[GFS2] Audit printk and kmalloc
[sfrench/cifs-2.6.git] / fs / gfs2 / locking / dlm / lock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include "lock_dlm.h"
11
12 static char junk_lvb[GDLM_LVB_SIZE];
13
14 static void queue_complete(struct gdlm_lock *lp)
15 {
16         struct gdlm_ls *ls = lp->ls;
17
18         clear_bit(LFL_ACTIVE, &lp->flags);
19
20         spin_lock(&ls->async_lock);
21         list_add_tail(&lp->clist, &ls->complete);
22         spin_unlock(&ls->async_lock);
23         wake_up(&ls->thread_wait);
24 }
25
26 static inline void gdlm_ast(void *astarg)
27 {
28         queue_complete((struct gdlm_lock *) astarg);
29 }
30
31 static inline void gdlm_bast(void *astarg, int mode)
32 {
33         struct gdlm_lock *lp = astarg;
34         struct gdlm_ls *ls = lp->ls;
35
36         if (!mode) {
37                 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
38                         lp->lockname.ln_type, lp->lockname.ln_number);
39                 return;
40         }
41
42         spin_lock(&ls->async_lock);
43         if (!lp->bast_mode) {
44                 list_add_tail(&lp->blist, &ls->blocking);
45                 lp->bast_mode = mode;
46         } else if (lp->bast_mode < mode)
47                 lp->bast_mode = mode;
48         spin_unlock(&ls->async_lock);
49         wake_up(&ls->thread_wait);
50 }
51
52 void gdlm_queue_delayed(struct gdlm_lock *lp)
53 {
54         struct gdlm_ls *ls = lp->ls;
55
56         spin_lock(&ls->async_lock);
57         list_add_tail(&lp->delay_list, &ls->delayed);
58         spin_unlock(&ls->async_lock);
59 }
60
61 /* convert gfs lock-state to dlm lock-mode */
62
63 static int16_t make_mode(int16_t lmstate)
64 {
65         switch (lmstate) {
66         case LM_ST_UNLOCKED:
67                 return DLM_LOCK_NL;
68         case LM_ST_EXCLUSIVE:
69                 return DLM_LOCK_EX;
70         case LM_ST_DEFERRED:
71                 return DLM_LOCK_CW;
72         case LM_ST_SHARED:
73                 return DLM_LOCK_PR;
74         }
75         gdlm_assert(0, "unknown LM state %d", lmstate);
76         return -1;
77 }
78
79 /* convert dlm lock-mode to gfs lock-state */
80
81 int16_t gdlm_make_lmstate(int16_t dlmmode)
82 {
83         switch (dlmmode) {
84         case DLM_LOCK_IV:
85         case DLM_LOCK_NL:
86                 return LM_ST_UNLOCKED;
87         case DLM_LOCK_EX:
88                 return LM_ST_EXCLUSIVE;
89         case DLM_LOCK_CW:
90                 return LM_ST_DEFERRED;
91         case DLM_LOCK_PR:
92                 return LM_ST_SHARED;
93         }
94         gdlm_assert(0, "unknown DLM mode %d", dlmmode);
95         return -1;
96 }
97
98 /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
99    DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
100
101 static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
102 {
103         int16_t cur = make_mode(cur_state);
104         if (lp->cur != DLM_LOCK_IV)
105                 gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
106 }
107
108 static inline unsigned int make_flags(struct gdlm_lock *lp,
109                                       unsigned int gfs_flags,
110                                       int16_t cur, int16_t req)
111 {
112         unsigned int lkf = 0;
113
114         if (gfs_flags & LM_FLAG_TRY)
115                 lkf |= DLM_LKF_NOQUEUE;
116
117         if (gfs_flags & LM_FLAG_TRY_1CB) {
118                 lkf |= DLM_LKF_NOQUEUE;
119                 lkf |= DLM_LKF_NOQUEUEBAST;
120         }
121
122         if (gfs_flags & LM_FLAG_PRIORITY) {
123                 lkf |= DLM_LKF_NOORDER;
124                 lkf |= DLM_LKF_HEADQUE;
125         }
126
127         if (gfs_flags & LM_FLAG_ANY) {
128                 if (req == DLM_LOCK_PR)
129                         lkf |= DLM_LKF_ALTCW;
130                 else if (req == DLM_LOCK_CW)
131                         lkf |= DLM_LKF_ALTPR;
132         }
133
134         if (lp->lksb.sb_lkid != 0) {
135                 lkf |= DLM_LKF_CONVERT;
136
137                 /* Conversion deadlock avoidance by DLM */
138
139                 if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
140                     !(lkf & DLM_LKF_NOQUEUE) &&
141                     cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
142                         lkf |= DLM_LKF_CONVDEADLK;
143         }
144
145         if (lp->lvb)
146                 lkf |= DLM_LKF_VALBLK;
147
148         return lkf;
149 }
150
151 /* make_strname - convert GFS lock numbers to a string */
152
153 static inline void make_strname(struct lm_lockname *lockname,
154                                 struct gdlm_strname *str)
155 {
156         sprintf(str->name, "%8x%16llx", lockname->ln_type,
157                 lockname->ln_number);
158         str->namelen = GDLM_STRNAME_BYTES;
159 }
160
161 int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
162                    struct gdlm_lock **lpp)
163 {
164         struct gdlm_lock *lp;
165
166         lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL);
167         if (!lp)
168                 return -ENOMEM;
169
170         lp->lockname = *name;
171         lp->ls = ls;
172         lp->cur = DLM_LOCK_IV;
173         lp->lvb = NULL;
174         lp->hold_null = NULL;
175         init_completion(&lp->ast_wait);
176         INIT_LIST_HEAD(&lp->clist);
177         INIT_LIST_HEAD(&lp->blist);
178         INIT_LIST_HEAD(&lp->delay_list);
179
180         spin_lock(&ls->async_lock);
181         list_add(&lp->all_list, &ls->all_locks);
182         ls->all_locks_count++;
183         spin_unlock(&ls->async_lock);
184
185         *lpp = lp;
186         return 0;
187 }
188
189 void gdlm_delete_lp(struct gdlm_lock *lp)
190 {
191         struct gdlm_ls *ls = lp->ls;
192
193         spin_lock(&ls->async_lock);
194         if (!list_empty(&lp->clist))
195                 list_del_init(&lp->clist);
196         if (!list_empty(&lp->blist))
197                 list_del_init(&lp->blist);
198         if (!list_empty(&lp->delay_list))
199                 list_del_init(&lp->delay_list);
200         gdlm_assert(!list_empty(&lp->all_list),
201                     "%x,%llx", lp->lockname.ln_type, lp->lockname.ln_number);
202         list_del_init(&lp->all_list);
203         ls->all_locks_count--;
204         spin_unlock(&ls->async_lock);
205
206         kfree(lp);
207 }
208
209 int gdlm_get_lock(lm_lockspace_t *lockspace, struct lm_lockname *name,
210                   lm_lock_t **lockp)
211 {
212         struct gdlm_lock *lp;
213         int error;
214
215         error = gdlm_create_lp((struct gdlm_ls *) lockspace, name, &lp);
216
217         *lockp = (lm_lock_t *) lp;
218         return error;
219 }
220
221 void gdlm_put_lock(lm_lock_t *lock)
222 {
223         gdlm_delete_lp((struct gdlm_lock *) lock);
224 }
225
226 unsigned int gdlm_do_lock(struct gdlm_lock *lp)
227 {
228         struct gdlm_ls *ls = lp->ls;
229         struct gdlm_strname str;
230         int error, bast = 1;
231
232         /*
233          * When recovery is in progress, delay lock requests for submission
234          * once recovery is done.  Requests for recovery (NOEXP) and unlocks
235          * can pass.
236          */
237
238         if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
239             !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
240                 gdlm_queue_delayed(lp);
241                 return LM_OUT_ASYNC;
242         }
243
244         /*
245          * Submit the actual lock request.
246          */
247
248         if (test_bit(LFL_NOBAST, &lp->flags))
249                 bast = 0;
250
251         make_strname(&lp->lockname, &str);
252
253         set_bit(LFL_ACTIVE, &lp->flags);
254
255         log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
256                   lp->lockname.ln_number, lp->lksb.sb_lkid,
257                   lp->cur, lp->req, lp->lkf);
258
259         error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
260                          str.name, str.namelen, 0, gdlm_ast, (void *) lp,
261                          bast ? gdlm_bast : NULL);
262
263         if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
264                 lp->lksb.sb_status = -EAGAIN;
265                 queue_complete(lp);
266                 error = 0;
267         }
268
269         if (error) {
270                 log_debug("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
271                           "flags=%lx", ls->fsname, lp->lockname.ln_type,
272                           lp->lockname.ln_number, error, lp->cur, lp->req,
273                           lp->lkf, lp->flags);
274                 return LM_OUT_ERROR;
275         }
276         return LM_OUT_ASYNC;
277 }
278
279 unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
280 {
281         struct gdlm_ls *ls = lp->ls;
282         unsigned int lkf = 0;
283         int error;
284
285         set_bit(LFL_DLM_UNLOCK, &lp->flags);
286         set_bit(LFL_ACTIVE, &lp->flags);
287
288         if (lp->lvb)
289                 lkf = DLM_LKF_VALBLK;
290
291         log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
292                   lp->lockname.ln_number, lp->lksb.sb_lkid, lp->cur, lkf);
293
294         error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
295
296         if (error) {
297                 log_debug("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
298                           "flags=%lx", ls->fsname, lp->lockname.ln_type,
299                           lp->lockname.ln_number, error, lp->cur, lp->req,
300                           lp->lkf, lp->flags);
301                 return LM_OUT_ERROR;
302         }
303         return LM_OUT_ASYNC;
304 }
305
306 unsigned int gdlm_lock(lm_lock_t *lock, unsigned int cur_state,
307                        unsigned int req_state, unsigned int flags)
308 {
309         struct gdlm_lock *lp = (struct gdlm_lock *) lock;
310
311         clear_bit(LFL_DLM_CANCEL, &lp->flags);
312         if (flags & LM_FLAG_NOEXP)
313                 set_bit(LFL_NOBLOCK, &lp->flags);
314
315         check_cur_state(lp, cur_state);
316         lp->req = make_mode(req_state);
317         lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
318
319         return gdlm_do_lock(lp);
320 }
321
322 unsigned int gdlm_unlock(lm_lock_t *lock, unsigned int cur_state)
323 {
324         struct gdlm_lock *lp = (struct gdlm_lock *) lock;
325
326         clear_bit(LFL_DLM_CANCEL, &lp->flags);
327         if (lp->cur == DLM_LOCK_IV)
328                 return 0;
329         return gdlm_do_unlock(lp);
330 }
331
332 void gdlm_cancel(lm_lock_t *lock)
333 {
334         struct gdlm_lock *lp = (struct gdlm_lock *) lock;
335         struct gdlm_ls *ls = lp->ls;
336         int error, delay_list = 0;
337
338         if (test_bit(LFL_DLM_CANCEL, &lp->flags))
339                 return;
340
341         log_info("gdlm_cancel %x,%llx flags %lx",
342                  lp->lockname.ln_type, lp->lockname.ln_number, lp->flags);
343
344         spin_lock(&ls->async_lock);
345         if (!list_empty(&lp->delay_list)) {
346                 list_del_init(&lp->delay_list);
347                 delay_list = 1;
348         }
349         spin_unlock(&ls->async_lock);
350
351         if (delay_list) {
352                 set_bit(LFL_CANCEL, &lp->flags);
353                 set_bit(LFL_ACTIVE, &lp->flags);
354                 queue_complete(lp);
355                 return;
356         }
357
358         if (!test_bit(LFL_ACTIVE, &lp->flags) ||
359             test_bit(LFL_DLM_UNLOCK, &lp->flags))       {
360                 log_info("gdlm_cancel skip %x,%llx flags %lx",
361                          lp->lockname.ln_type, lp->lockname.ln_number,
362                          lp->flags);
363                 return;
364         }
365
366         /* the lock is blocked in the dlm */
367
368         set_bit(LFL_DLM_CANCEL, &lp->flags);
369         set_bit(LFL_ACTIVE, &lp->flags);
370
371         error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
372                            NULL, lp);
373
374         log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
375                  lp->lockname.ln_type, lp->lockname.ln_number, lp->flags);
376
377         if (error == -EBUSY)
378                 clear_bit(LFL_DLM_CANCEL, &lp->flags);
379 }
380
381 int gdlm_add_lvb(struct gdlm_lock *lp)
382 {
383         char *lvb;
384
385         lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL);
386         if (!lvb)
387                 return -ENOMEM;
388
389         lp->lksb.sb_lvbptr = lvb;
390         lp->lvb = lvb;
391         return 0;
392 }
393
394 void gdlm_del_lvb(struct gdlm_lock *lp)
395 {
396         kfree(lp->lvb);
397         lp->lvb = NULL;
398         lp->lksb.sb_lvbptr = NULL;
399 }
400
401 /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
402    the completion) because gfs won't call hold_lvb() during a callback (from
403    the context of a lock_dlm thread). */
404
405 static int hold_null_lock(struct gdlm_lock *lp)
406 {
407         struct gdlm_lock *lpn = NULL;
408         int error;
409
410         if (lp->hold_null) {
411                 printk(KERN_INFO "lock_dlm: lvb already held\n");
412                 return 0;
413         }
414
415         error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
416         if (error)
417                 goto out;
418
419         lpn->lksb.sb_lvbptr = junk_lvb;
420         lpn->lvb = junk_lvb;
421
422         lpn->req = DLM_LOCK_NL;
423         lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
424         set_bit(LFL_NOBAST, &lpn->flags);
425         set_bit(LFL_INLOCK, &lpn->flags);
426
427         init_completion(&lpn->ast_wait);
428         gdlm_do_lock(lpn);
429         wait_for_completion(&lpn->ast_wait);
430         error = lp->lksb.sb_status;
431         if (error) {
432                 printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
433                        error);
434                 gdlm_delete_lp(lpn);
435                 lpn = NULL;
436         }
437  out:
438         lp->hold_null = lpn;
439         return error;
440 }
441
442 /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
443    the completion) because gfs may call unhold_lvb() during a callback (from
444    the context of a lock_dlm thread) which could cause a deadlock since the
445    other lock_dlm thread could be engaged in recovery. */
446
447 static void unhold_null_lock(struct gdlm_lock *lp)
448 {
449         struct gdlm_lock *lpn = lp->hold_null;
450
451         gdlm_assert(lpn, "%x,%llx",
452                     lp->lockname.ln_type, lp->lockname.ln_number);
453         lpn->lksb.sb_lvbptr = NULL;
454         lpn->lvb = NULL;
455         set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
456         gdlm_do_unlock(lpn);
457         lp->hold_null = NULL;
458 }
459
460 /* Acquire a NL lock because gfs requires the value block to remain
461    intact on the resource while the lvb is "held" even if it's holding no locks
462    on the resource. */
463
464 int gdlm_hold_lvb(lm_lock_t *lock, char **lvbp)
465 {
466         struct gdlm_lock *lp = (struct gdlm_lock *) lock;
467         int error;
468
469         error = gdlm_add_lvb(lp);
470         if (error)
471                 return error;
472
473         *lvbp = lp->lvb;
474
475         error = hold_null_lock(lp);
476         if (error)
477                 gdlm_del_lvb(lp);
478
479         return error;
480 }
481
482 void gdlm_unhold_lvb(lm_lock_t *lock, char *lvb)
483 {
484         struct gdlm_lock *lp = (struct gdlm_lock *) lock;
485
486         unhold_null_lock(lp);
487         gdlm_del_lvb(lp);
488 }
489
490 void gdlm_sync_lvb(lm_lock_t *lock, char *lvb)
491 {
492         struct gdlm_lock *lp = (struct gdlm_lock *) lock;
493
494         if (lp->cur != DLM_LOCK_EX)
495                 return;
496
497         init_completion(&lp->ast_wait);
498         set_bit(LFL_SYNC_LVB, &lp->flags);
499
500         lp->req = DLM_LOCK_EX;
501         lp->lkf = make_flags(lp, 0, lp->cur, lp->req);
502
503         gdlm_do_lock(lp);
504         wait_for_completion(&lp->ast_wait);
505 }
506
507 void gdlm_submit_delayed(struct gdlm_ls *ls)
508 {
509         struct gdlm_lock *lp, *safe;
510
511         spin_lock(&ls->async_lock);
512         list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
513                 list_del_init(&lp->delay_list);
514                 list_add_tail(&lp->delay_list, &ls->submit);
515         }
516         spin_unlock(&ls->async_lock);
517         wake_up(&ls->thread_wait);
518 }
519
520 int gdlm_release_all_locks(struct gdlm_ls *ls)
521 {
522         struct gdlm_lock *lp, *safe;
523         int count = 0;
524
525         spin_lock(&ls->async_lock);
526         list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
527                 list_del_init(&lp->all_list);
528
529                 if (lp->lvb && lp->lvb != junk_lvb)
530                         kfree(lp->lvb);
531                 kfree(lp);
532                 count++;
533         }
534         spin_unlock(&ls->async_lock);
535
536         return count;
537 }
538