Merge master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
[sfrench/cifs-2.6.git] / fs / ocfs2 / dlm / dlmrecovery.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmrecovery.c
5  *
6  * recovery stuff
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43
44
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
48
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
52
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
55
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
63
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68                                  u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73                                         const char *lockname, int namelen,
74                                         int total_locks, u64 cookie,
75                                         u8 flags, u8 master);
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77                                     struct dlm_migratable_lockres *mres,
78                                     u8 send_to,
79                                     struct dlm_lock_resource *res,
80                                     int total_locks);
81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82                                      struct dlm_lock_resource *res,
83                                      struct dlm_migratable_lockres *mres);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86                                  u8 dead_node, u8 send_to);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89                                         struct list_head *list, u8 dead_node);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91                                               u8 dead_node, u8 new_master);
92 static void dlm_reco_ast(void *astdata);
93 static void dlm_reco_bast(void *astdata, int blocked_type);
94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96                                          void *data);
97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98
99 static u64 dlm_get_next_mig_cookie(void);
100
101 static DEFINE_SPINLOCK(dlm_reco_state_lock);
102 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
103 static u64 dlm_mig_cookie = 1;
104
105 static u64 dlm_get_next_mig_cookie(void)
106 {
107         u64 c;
108         spin_lock(&dlm_mig_cookie_lock);
109         c = dlm_mig_cookie;
110         if (dlm_mig_cookie == (~0ULL))
111                 dlm_mig_cookie = 1;
112         else
113                 dlm_mig_cookie++;
114         spin_unlock(&dlm_mig_cookie_lock);
115         return c;
116 }
117
118 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
119                                           u8 dead_node)
120 {
121         assert_spin_locked(&dlm->spinlock);
122         if (dlm->reco.dead_node != dead_node)
123                 mlog(0, "%s: changing dead_node from %u to %u\n",
124                      dlm->name, dlm->reco.dead_node, dead_node);
125         dlm->reco.dead_node = dead_node;
126 }
127
128 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
129                                        u8 master)
130 {
131         assert_spin_locked(&dlm->spinlock);
132         mlog(0, "%s: changing new_master from %u to %u\n",
133              dlm->name, dlm->reco.new_master, master);
134         dlm->reco.new_master = master;
135 }
136
137 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
138 {
139         assert_spin_locked(&dlm->spinlock);
140         clear_bit(dlm->reco.dead_node, dlm->recovery_map);
141         dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
142         dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
143 }
144
145 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
146 {
147         spin_lock(&dlm->spinlock);
148         __dlm_reset_recovery(dlm);
149         spin_unlock(&dlm->spinlock);
150 }
151
152 /* Worker function used during recovery. */
153 void dlm_dispatch_work(void *data)
154 {
155         struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
156         LIST_HEAD(tmp_list);
157         struct list_head *iter, *iter2;
158         struct dlm_work_item *item;
159         dlm_workfunc_t *workfunc;
160         int tot=0;
161
162         if (!dlm_joined(dlm))
163                 return;
164
165         spin_lock(&dlm->work_lock);
166         list_splice_init(&dlm->work_list, &tmp_list);
167         spin_unlock(&dlm->work_lock);
168
169         list_for_each_safe(iter, iter2, &tmp_list) {
170                 tot++;
171         }
172         mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
173
174         list_for_each_safe(iter, iter2, &tmp_list) {
175                 item = list_entry(iter, struct dlm_work_item, list);
176                 workfunc = item->func;
177                 list_del_init(&item->list);
178
179                 /* already have ref on dlm to avoid having
180                  * it disappear.  just double-check. */
181                 BUG_ON(item->dlm != dlm);
182
183                 /* this is allowed to sleep and
184                  * call network stuff */
185                 workfunc(item, item->data);
186
187                 dlm_put(dlm);
188                 kfree(item);
189         }
190 }
191
192 /*
193  * RECOVERY THREAD
194  */
195
196 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
197 {
198         /* wake the recovery thread
199          * this will wake the reco thread in one of three places
200          * 1) sleeping with no recovery happening
201          * 2) sleeping with recovery mastered elsewhere
202          * 3) recovery mastered here, waiting on reco data */
203
204         wake_up(&dlm->dlm_reco_thread_wq);
205 }
206
207 /* Launch the recovery thread */
208 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
209 {
210         mlog(0, "starting dlm recovery thread...\n");
211
212         dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
213                                                 "dlm_reco_thread");
214         if (IS_ERR(dlm->dlm_reco_thread_task)) {
215                 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
216                 dlm->dlm_reco_thread_task = NULL;
217                 return -EINVAL;
218         }
219
220         return 0;
221 }
222
223 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
224 {
225         if (dlm->dlm_reco_thread_task) {
226                 mlog(0, "waiting for dlm recovery thread to exit\n");
227                 kthread_stop(dlm->dlm_reco_thread_task);
228                 dlm->dlm_reco_thread_task = NULL;
229         }
230 }
231
232
233
234 /*
235  * this is lame, but here's how recovery works...
236  * 1) all recovery threads cluster wide will work on recovering
237  *    ONE node at a time
238  * 2) negotiate who will take over all the locks for the dead node.
239  *    thats right... ALL the locks.
240  * 3) once a new master is chosen, everyone scans all locks
241  *    and moves aside those mastered by the dead guy
242  * 4) each of these locks should be locked until recovery is done
243  * 5) the new master collects up all of secondary lock queue info
244  *    one lock at a time, forcing each node to communicate back
245  *    before continuing
246  * 6) each secondary lock queue responds with the full known lock info
247  * 7) once the new master has run all its locks, it sends a ALLDONE!
248  *    message to everyone
249  * 8) upon receiving this message, the secondary queue node unlocks
250  *    and responds to the ALLDONE
251  * 9) once the new master gets responses from everyone, he unlocks
252  *    everything and recovery for this dead node is done
253  *10) go back to 2) while there are still dead nodes
254  *
255  */
256
257 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
258 {
259         struct dlm_reco_node_data *ndata;
260         struct dlm_lock_resource *res;
261
262         mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
263              dlm->name, dlm->dlm_reco_thread_task->pid,
264              dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
265              dlm->reco.dead_node, dlm->reco.new_master);
266
267         list_for_each_entry(ndata, &dlm->reco.node_data, list) {
268                 char *st = "unknown";
269                 switch (ndata->state) {
270                         case DLM_RECO_NODE_DATA_INIT:
271                                 st = "init";
272                                 break;
273                         case DLM_RECO_NODE_DATA_REQUESTING:
274                                 st = "requesting";
275                                 break;
276                         case DLM_RECO_NODE_DATA_DEAD:
277                                 st = "dead";
278                                 break;
279                         case DLM_RECO_NODE_DATA_RECEIVING:
280                                 st = "receiving";
281                                 break;
282                         case DLM_RECO_NODE_DATA_REQUESTED:
283                                 st = "requested";
284                                 break;
285                         case DLM_RECO_NODE_DATA_DONE:
286                                 st = "done";
287                                 break;
288                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
289                                 st = "finalize-sent";
290                                 break;
291                         default:
292                                 st = "bad";
293                                 break;
294                 }
295                 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
296                      dlm->name, ndata->node_num, st);
297         }
298         list_for_each_entry(res, &dlm->reco.resources, recovering) {
299                 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
300                      dlm->name, res->lockname.len, res->lockname.name);
301         }
302 }
303
304 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
305
306 static int dlm_recovery_thread(void *data)
307 {
308         int status;
309         struct dlm_ctxt *dlm = data;
310         unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
311
312         mlog(0, "dlm thread running for %s...\n", dlm->name);
313
314         while (!kthread_should_stop()) {
315                 if (dlm_joined(dlm)) {
316                         status = dlm_do_recovery(dlm);
317                         if (status == -EAGAIN) {
318                                 /* do not sleep, recheck immediately. */
319                                 continue;
320                         }
321                         if (status < 0)
322                                 mlog_errno(status);
323                 }
324
325                 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
326                                                  kthread_should_stop(),
327                                                  timeout);
328         }
329
330         mlog(0, "quitting DLM recovery thread\n");
331         return 0;
332 }
333
334 /* returns true when the recovery master has contacted us */
335 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
336 {
337         int ready;
338         spin_lock(&dlm->spinlock);
339         ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
340         spin_unlock(&dlm->spinlock);
341         return ready;
342 }
343
344 /* returns true if node is no longer in the domain
345  * could be dead or just not joined */
346 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
347 {
348         int dead;
349         spin_lock(&dlm->spinlock);
350         dead = !test_bit(node, dlm->domain_map);
351         spin_unlock(&dlm->spinlock);
352         return dead;
353 }
354
355 /* returns true if node is no longer in the domain
356  * could be dead or just not joined */
357 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
358 {
359         int recovered;
360         spin_lock(&dlm->spinlock);
361         recovered = !test_bit(node, dlm->recovery_map);
362         spin_unlock(&dlm->spinlock);
363         return recovered;
364 }
365
366
367 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
368 {
369         if (timeout) {
370                 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
371                      "death of node %u\n", dlm->name, timeout, node);
372                 wait_event_timeout(dlm->dlm_reco_thread_wq,
373                            dlm_is_node_dead(dlm, node),
374                            msecs_to_jiffies(timeout));
375         } else {
376                 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
377                      "of death of node %u\n", dlm->name, node);
378                 wait_event(dlm->dlm_reco_thread_wq,
379                            dlm_is_node_dead(dlm, node));
380         }
381         /* for now, return 0 */
382         return 0;
383 }
384
385 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
386 {
387         if (timeout) {
388                 mlog(0, "%s: waiting %dms for notification of "
389                      "recovery of node %u\n", dlm->name, timeout, node);
390                 wait_event_timeout(dlm->dlm_reco_thread_wq,
391                            dlm_is_node_recovered(dlm, node),
392                            msecs_to_jiffies(timeout));
393         } else {
394                 mlog(0, "%s: waiting indefinitely for notification "
395                      "of recovery of node %u\n", dlm->name, node);
396                 wait_event(dlm->dlm_reco_thread_wq,
397                            dlm_is_node_recovered(dlm, node));
398         }
399         /* for now, return 0 */
400         return 0;
401 }
402
403 /* callers of the top-level api calls (dlmlock/dlmunlock) should
404  * block on the dlm->reco.event when recovery is in progress.
405  * the dlm recovery thread will set this state when it begins
406  * recovering a dead node (as the new master or not) and clear
407  * the state and wake as soon as all affected lock resources have
408  * been marked with the RECOVERY flag */
409 static int dlm_in_recovery(struct dlm_ctxt *dlm)
410 {
411         int in_recovery;
412         spin_lock(&dlm->spinlock);
413         in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
414         spin_unlock(&dlm->spinlock);
415         return in_recovery;
416 }
417
418
419 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
420 {
421         if (dlm_in_recovery(dlm)) {
422                 mlog(0, "%s: reco thread %d in recovery: "
423                      "state=%d, master=%u, dead=%u\n",
424                      dlm->name, dlm->dlm_reco_thread_task->pid,
425                      dlm->reco.state, dlm->reco.new_master,
426                      dlm->reco.dead_node);
427         }
428         wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
429 }
430
431 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
432 {
433         spin_lock(&dlm->spinlock);
434         BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
435         dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
436         spin_unlock(&dlm->spinlock);
437 }
438
439 static void dlm_end_recovery(struct dlm_ctxt *dlm)
440 {
441         spin_lock(&dlm->spinlock);
442         BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
443         dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
444         spin_unlock(&dlm->spinlock);
445         wake_up(&dlm->reco.event);
446 }
447
448 static int dlm_do_recovery(struct dlm_ctxt *dlm)
449 {
450         int status = 0;
451         int ret;
452
453         spin_lock(&dlm->spinlock);
454
455         /* check to see if the new master has died */
456         if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
457             test_bit(dlm->reco.new_master, dlm->recovery_map)) {
458                 mlog(0, "new master %u died while recovering %u!\n",
459                      dlm->reco.new_master, dlm->reco.dead_node);
460                 /* unset the new_master, leave dead_node */
461                 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
462         }
463
464         /* select a target to recover */
465         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
466                 int bit;
467
468                 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
469                 if (bit >= O2NM_MAX_NODES || bit < 0)
470                         dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
471                 else
472                         dlm_set_reco_dead_node(dlm, bit);
473         } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
474                 /* BUG? */
475                 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
476                      dlm->reco.dead_node);
477                 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
478         }
479
480         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
481                 // mlog(0, "nothing to recover!  sleeping now!\n");
482                 spin_unlock(&dlm->spinlock);
483                 /* return to main thread loop and sleep. */
484                 return 0;
485         }
486         mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
487              dlm->name, dlm->dlm_reco_thread_task->pid,
488              dlm->reco.dead_node);
489         spin_unlock(&dlm->spinlock);
490
491         /* take write barrier */
492         /* (stops the list reshuffling thread, proxy ast handling) */
493         dlm_begin_recovery(dlm);
494
495         if (dlm->reco.new_master == dlm->node_num)
496                 goto master_here;
497
498         if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
499                 /* choose a new master, returns 0 if this node
500                  * is the master, -EEXIST if it's another node.
501                  * this does not return until a new master is chosen
502                  * or recovery completes entirely. */
503                 ret = dlm_pick_recovery_master(dlm);
504                 if (!ret) {
505                         /* already notified everyone.  go. */
506                         goto master_here;
507                 }
508                 mlog(0, "another node will master this recovery session.\n");
509         }
510         mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
511              dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
512              dlm->node_num, dlm->reco.dead_node);
513
514         /* it is safe to start everything back up here
515          * because all of the dead node's lock resources
516          * have been marked as in-recovery */
517         dlm_end_recovery(dlm);
518
519         /* sleep out in main dlm_recovery_thread loop. */
520         return 0;
521
522 master_here:
523         mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
524              dlm->dlm_reco_thread_task->pid,
525              dlm->name, dlm->reco.dead_node, dlm->node_num);
526
527         status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
528         if (status < 0) {
529                 /* we should never hit this anymore */
530                 mlog(ML_ERROR, "error %d remastering locks for node %u, "
531                      "retrying.\n", status, dlm->reco.dead_node);
532                 /* yield a bit to allow any final network messages
533                  * to get handled on remaining nodes */
534                 msleep(100);
535         } else {
536                 /* success!  see if any other nodes need recovery */
537                 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
538                      dlm->name, dlm->reco.dead_node, dlm->node_num);
539                 dlm_reset_recovery(dlm);
540         }
541         dlm_end_recovery(dlm);
542
543         /* continue and look for another dead node */
544         return -EAGAIN;
545 }
546
547 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
548 {
549         int status = 0;
550         struct dlm_reco_node_data *ndata;
551         struct list_head *iter;
552         int all_nodes_done;
553         int destroy = 0;
554         int pass = 0;
555
556         do {
557                 /* we have become recovery master.  there is no escaping
558                  * this, so just keep trying until we get it. */
559                 status = dlm_init_recovery_area(dlm, dead_node);
560                 if (status < 0) {
561                         mlog(ML_ERROR, "%s: failed to alloc recovery area, "
562                              "retrying\n", dlm->name);
563                         msleep(1000);
564                 }
565         } while (status != 0);
566
567         /* safe to access the node data list without a lock, since this
568          * process is the only one to change the list */
569         list_for_each(iter, &dlm->reco.node_data) {
570                 ndata = list_entry (iter, struct dlm_reco_node_data, list);
571                 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
572                 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
573
574                 mlog(0, "requesting lock info from node %u\n",
575                      ndata->node_num);
576
577                 if (ndata->node_num == dlm->node_num) {
578                         ndata->state = DLM_RECO_NODE_DATA_DONE;
579                         continue;
580                 }
581
582                 do {
583                         status = dlm_request_all_locks(dlm, ndata->node_num,
584                                                        dead_node);
585                         if (status < 0) {
586                                 mlog_errno(status);
587                                 if (dlm_is_host_down(status)) {
588                                         /* node died, ignore it for recovery */
589                                         status = 0;
590                                         ndata->state = DLM_RECO_NODE_DATA_DEAD;
591                                         /* wait for the domain map to catch up
592                                          * with the network state. */
593                                         wait_event_timeout(dlm->dlm_reco_thread_wq,
594                                                            dlm_is_node_dead(dlm,
595                                                                 ndata->node_num),
596                                                            msecs_to_jiffies(1000));
597                                         mlog(0, "waited 1 sec for %u, "
598                                              "dead? %s\n", ndata->node_num,
599                                              dlm_is_node_dead(dlm, ndata->node_num) ?
600                                              "yes" : "no");
601                                 } else {
602                                         /* -ENOMEM on the other node */
603                                         mlog(0, "%s: node %u returned "
604                                              "%d during recovery, retrying "
605                                              "after a short wait\n",
606                                              dlm->name, ndata->node_num,
607                                              status);
608                                         msleep(100);
609                                 }
610                         }
611                 } while (status != 0);
612
613                 switch (ndata->state) {
614                         case DLM_RECO_NODE_DATA_INIT:
615                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
616                         case DLM_RECO_NODE_DATA_REQUESTED:
617                                 BUG();
618                                 break;
619                         case DLM_RECO_NODE_DATA_DEAD:
620                                 mlog(0, "node %u died after requesting "
621                                      "recovery info for node %u\n",
622                                      ndata->node_num, dead_node);
623                                 /* fine.  don't need this node's info.
624                                  * continue without it. */
625                                 break;
626                         case DLM_RECO_NODE_DATA_REQUESTING:
627                                 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
628                                 mlog(0, "now receiving recovery data from "
629                                      "node %u for dead node %u\n",
630                                      ndata->node_num, dead_node);
631                                 break;
632                         case DLM_RECO_NODE_DATA_RECEIVING:
633                                 mlog(0, "already receiving recovery data from "
634                                      "node %u for dead node %u\n",
635                                      ndata->node_num, dead_node);
636                                 break;
637                         case DLM_RECO_NODE_DATA_DONE:
638                                 mlog(0, "already DONE receiving recovery data "
639                                      "from node %u for dead node %u\n",
640                                      ndata->node_num, dead_node);
641                                 break;
642                 }
643         }
644
645         mlog(0, "done requesting all lock info\n");
646
647         /* nodes should be sending reco data now
648          * just need to wait */
649
650         while (1) {
651                 /* check all the nodes now to see if we are
652                  * done, or if anyone died */
653                 all_nodes_done = 1;
654                 spin_lock(&dlm_reco_state_lock);
655                 list_for_each(iter, &dlm->reco.node_data) {
656                         ndata = list_entry (iter, struct dlm_reco_node_data, list);
657
658                         mlog(0, "checking recovery state of node %u\n",
659                              ndata->node_num);
660                         switch (ndata->state) {
661                                 case DLM_RECO_NODE_DATA_INIT:
662                                 case DLM_RECO_NODE_DATA_REQUESTING:
663                                         mlog(ML_ERROR, "bad ndata state for "
664                                              "node %u: state=%d\n",
665                                              ndata->node_num, ndata->state);
666                                         BUG();
667                                         break;
668                                 case DLM_RECO_NODE_DATA_DEAD:
669                                         mlog(0, "node %u died after "
670                                              "requesting recovery info for "
671                                              "node %u\n", ndata->node_num,
672                                              dead_node);
673                                         break;
674                                 case DLM_RECO_NODE_DATA_RECEIVING:
675                                 case DLM_RECO_NODE_DATA_REQUESTED:
676                                         mlog(0, "%s: node %u still in state %s\n",
677                                              dlm->name, ndata->node_num,
678                                              ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
679                                              "receiving" : "requested");
680                                         all_nodes_done = 0;
681                                         break;
682                                 case DLM_RECO_NODE_DATA_DONE:
683                                         mlog(0, "%s: node %u state is done\n",
684                                              dlm->name, ndata->node_num);
685                                         break;
686                                 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
687                                         mlog(0, "%s: node %u state is finalize\n",
688                                              dlm->name, ndata->node_num);
689                                         break;
690                         }
691                 }
692                 spin_unlock(&dlm_reco_state_lock);
693
694                 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
695                      all_nodes_done?"yes":"no");
696                 if (all_nodes_done) {
697                         int ret;
698
699                         /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
700                          * just send a finalize message to everyone and
701                          * clean up */
702                         mlog(0, "all nodes are done! send finalize\n");
703                         ret = dlm_send_finalize_reco_message(dlm);
704                         if (ret < 0)
705                                 mlog_errno(ret);
706
707                         spin_lock(&dlm->spinlock);
708                         dlm_finish_local_lockres_recovery(dlm, dead_node,
709                                                           dlm->node_num);
710                         spin_unlock(&dlm->spinlock);
711                         mlog(0, "should be done with recovery!\n");
712
713                         mlog(0, "finishing recovery of %s at %lu, "
714                              "dead=%u, this=%u, new=%u\n", dlm->name,
715                              jiffies, dlm->reco.dead_node,
716                              dlm->node_num, dlm->reco.new_master);
717                         destroy = 1;
718                         status = 0;
719                         /* rescan everything marked dirty along the way */
720                         dlm_kick_thread(dlm, NULL);
721                         break;
722                 }
723                 /* wait to be signalled, with periodic timeout
724                  * to check for node death */
725                 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
726                                          kthread_should_stop(),
727                                          msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
728
729         }
730
731         if (destroy)
732                 dlm_destroy_recovery_area(dlm, dead_node);
733
734         mlog_exit(status);
735         return status;
736 }
737
738 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
739 {
740         int num=0;
741         struct dlm_reco_node_data *ndata;
742
743         spin_lock(&dlm->spinlock);
744         memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
745         /* nodes can only be removed (by dying) after dropping
746          * this lock, and death will be trapped later, so this should do */
747         spin_unlock(&dlm->spinlock);
748
749         while (1) {
750                 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
751                 if (num >= O2NM_MAX_NODES) {
752                         break;
753                 }
754                 BUG_ON(num == dead_node);
755
756                 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS);
757                 if (!ndata) {
758                         dlm_destroy_recovery_area(dlm, dead_node);
759                         return -ENOMEM;
760                 }
761                 ndata->node_num = num;
762                 ndata->state = DLM_RECO_NODE_DATA_INIT;
763                 spin_lock(&dlm_reco_state_lock);
764                 list_add_tail(&ndata->list, &dlm->reco.node_data);
765                 spin_unlock(&dlm_reco_state_lock);
766                 num++;
767         }
768
769         return 0;
770 }
771
772 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
773 {
774         struct list_head *iter, *iter2;
775         struct dlm_reco_node_data *ndata;
776         LIST_HEAD(tmplist);
777
778         spin_lock(&dlm_reco_state_lock);
779         list_splice_init(&dlm->reco.node_data, &tmplist);
780         spin_unlock(&dlm_reco_state_lock);
781
782         list_for_each_safe(iter, iter2, &tmplist) {
783                 ndata = list_entry (iter, struct dlm_reco_node_data, list);
784                 list_del_init(&ndata->list);
785                 kfree(ndata);
786         }
787 }
788
789 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
790                                  u8 dead_node)
791 {
792         struct dlm_lock_request lr;
793         enum dlm_status ret;
794
795         mlog(0, "\n");
796
797
798         mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
799                   "to %u\n", dead_node, request_from);
800
801         memset(&lr, 0, sizeof(lr));
802         lr.node_idx = dlm->node_num;
803         lr.dead_node = dead_node;
804
805         // send message
806         ret = DLM_NOLOCKMGR;
807         ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
808                                  &lr, sizeof(lr), request_from, NULL);
809
810         /* negative status is handled by caller */
811         if (ret < 0)
812                 mlog_errno(ret);
813
814         // return from here, then
815         // sleep until all received or error
816         return ret;
817
818 }
819
820 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
821 {
822         struct dlm_ctxt *dlm = data;
823         struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
824         char *buf = NULL;
825         struct dlm_work_item *item = NULL;
826
827         if (!dlm_grab(dlm))
828                 return -EINVAL;
829
830         if (lr->dead_node != dlm->reco.dead_node) {
831                 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
832                      "dead_node is %u\n", dlm->name, lr->node_idx,
833                      lr->dead_node, dlm->reco.dead_node);
834                 dlm_print_reco_node_status(dlm);
835                 /* this is a hack */
836                 dlm_put(dlm);
837                 return -ENOMEM;
838         }
839         BUG_ON(lr->dead_node != dlm->reco.dead_node);
840
841         item = kcalloc(1, sizeof(*item), GFP_NOFS);
842         if (!item) {
843                 dlm_put(dlm);
844                 return -ENOMEM;
845         }
846
847         /* this will get freed by dlm_request_all_locks_worker */
848         buf = (char *) __get_free_page(GFP_NOFS);
849         if (!buf) {
850                 kfree(item);
851                 dlm_put(dlm);
852                 return -ENOMEM;
853         }
854
855         /* queue up work for dlm_request_all_locks_worker */
856         dlm_grab(dlm);  /* get an extra ref for the work item */
857         dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
858         item->u.ral.reco_master = lr->node_idx;
859         item->u.ral.dead_node = lr->dead_node;
860         spin_lock(&dlm->work_lock);
861         list_add_tail(&item->list, &dlm->work_list);
862         spin_unlock(&dlm->work_lock);
863         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
864
865         dlm_put(dlm);
866         return 0;
867 }
868
869 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
870 {
871         struct dlm_migratable_lockres *mres;
872         struct dlm_lock_resource *res;
873         struct dlm_ctxt *dlm;
874         LIST_HEAD(resources);
875         struct list_head *iter;
876         int ret;
877         u8 dead_node, reco_master;
878         int skip_all_done = 0;
879
880         dlm = item->dlm;
881         dead_node = item->u.ral.dead_node;
882         reco_master = item->u.ral.reco_master;
883         mres = (struct dlm_migratable_lockres *)data;
884
885         mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
886              dlm->name, dead_node, reco_master);
887
888         if (dead_node != dlm->reco.dead_node ||
889             reco_master != dlm->reco.new_master) {
890                 /* worker could have been created before the recovery master
891                  * died.  if so, do not continue, but do not error. */
892                 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
893                         mlog(ML_NOTICE, "%s: will not send recovery state, "
894                              "recovery master %u died, thread=(dead=%u,mas=%u)"
895                              " current=(dead=%u,mas=%u)\n", dlm->name,
896                              reco_master, dead_node, reco_master,
897                              dlm->reco.dead_node, dlm->reco.new_master);
898                 } else {
899                         mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
900                              "master=%u), request(dead=%u, master=%u)\n",
901                              dlm->name, dlm->reco.dead_node,
902                              dlm->reco.new_master, dead_node, reco_master);
903                 }
904                 goto leave;
905         }
906
907         /* lock resources should have already been moved to the
908          * dlm->reco.resources list.  now move items from that list
909          * to a temp list if the dead owner matches.  note that the
910          * whole cluster recovers only one node at a time, so we
911          * can safely move UNKNOWN lock resources for each recovery
912          * session. */
913         dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
914
915         /* now we can begin blasting lockreses without the dlm lock */
916
917         /* any errors returned will be due to the new_master dying,
918          * the dlm_reco_thread should detect this */
919         list_for_each(iter, &resources) {
920                 res = list_entry (iter, struct dlm_lock_resource, recovering);
921                 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
922                                         DLM_MRES_RECOVERY);
923                 if (ret < 0) {
924                         mlog(ML_ERROR, "%s: node %u went down while sending "
925                              "recovery state for dead node %u, ret=%d\n", dlm->name,
926                              reco_master, dead_node, ret);
927                         skip_all_done = 1;
928                         break;
929                 }
930         }
931
932         /* move the resources back to the list */
933         spin_lock(&dlm->spinlock);
934         list_splice_init(&resources, &dlm->reco.resources);
935         spin_unlock(&dlm->spinlock);
936
937         if (!skip_all_done) {
938                 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
939                 if (ret < 0) {
940                         mlog(ML_ERROR, "%s: node %u went down while sending "
941                              "recovery all-done for dead node %u, ret=%d\n",
942                              dlm->name, reco_master, dead_node, ret);
943                 }
944         }
945 leave:
946         free_page((unsigned long)data);
947 }
948
949
950 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
951 {
952         int ret, tmpret;
953         struct dlm_reco_data_done done_msg;
954
955         memset(&done_msg, 0, sizeof(done_msg));
956         done_msg.node_idx = dlm->node_num;
957         done_msg.dead_node = dead_node;
958         mlog(0, "sending DATA DONE message to %u, "
959              "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
960              done_msg.dead_node);
961
962         ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
963                                  sizeof(done_msg), send_to, &tmpret);
964         if (ret < 0) {
965                 if (!dlm_is_host_down(ret)) {
966                         mlog_errno(ret);
967                         mlog(ML_ERROR, "%s: unknown error sending data-done "
968                              "to %u\n", dlm->name, send_to);
969                         BUG();
970                 }
971         } else
972                 ret = tmpret;
973         return ret;
974 }
975
976
977 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
978 {
979         struct dlm_ctxt *dlm = data;
980         struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
981         struct list_head *iter;
982         struct dlm_reco_node_data *ndata = NULL;
983         int ret = -EINVAL;
984
985         if (!dlm_grab(dlm))
986                 return -EINVAL;
987
988         mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
989              "node_idx=%u, this node=%u\n", done->dead_node,
990              dlm->reco.dead_node, done->node_idx, dlm->node_num);
991
992         mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
993                         "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
994                         "node_idx=%u, this node=%u\n", done->dead_node,
995                         dlm->reco.dead_node, done->node_idx, dlm->node_num);
996
997         spin_lock(&dlm_reco_state_lock);
998         list_for_each(iter, &dlm->reco.node_data) {
999                 ndata = list_entry (iter, struct dlm_reco_node_data, list);
1000                 if (ndata->node_num != done->node_idx)
1001                         continue;
1002
1003                 switch (ndata->state) {
1004                         /* should have moved beyond INIT but not to FINALIZE yet */
1005                         case DLM_RECO_NODE_DATA_INIT:
1006                         case DLM_RECO_NODE_DATA_DEAD:
1007                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1008                                 mlog(ML_ERROR, "bad ndata state for node %u:"
1009                                      " state=%d\n", ndata->node_num,
1010                                      ndata->state);
1011                                 BUG();
1012                                 break;
1013                         /* these states are possible at this point, anywhere along
1014                          * the line of recovery */
1015                         case DLM_RECO_NODE_DATA_DONE:
1016                         case DLM_RECO_NODE_DATA_RECEIVING:
1017                         case DLM_RECO_NODE_DATA_REQUESTED:
1018                         case DLM_RECO_NODE_DATA_REQUESTING:
1019                                 mlog(0, "node %u is DONE sending "
1020                                           "recovery data!\n",
1021                                           ndata->node_num);
1022
1023                                 ndata->state = DLM_RECO_NODE_DATA_DONE;
1024                                 ret = 0;
1025                                 break;
1026                 }
1027         }
1028         spin_unlock(&dlm_reco_state_lock);
1029
1030         /* wake the recovery thread, some node is done */
1031         if (!ret)
1032                 dlm_kick_recovery_thread(dlm);
1033
1034         if (ret < 0)
1035                 mlog(ML_ERROR, "failed to find recovery node data for node "
1036                      "%u\n", done->node_idx);
1037         dlm_put(dlm);
1038
1039         mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1040         return ret;
1041 }
1042
1043 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1044                                         struct list_head *list,
1045                                         u8 dead_node)
1046 {
1047         struct dlm_lock_resource *res;
1048         struct list_head *iter, *iter2;
1049         struct dlm_lock *lock;
1050
1051         spin_lock(&dlm->spinlock);
1052         list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1053                 res = list_entry (iter, struct dlm_lock_resource, recovering);
1054                 /* always prune any $RECOVERY entries for dead nodes,
1055                  * otherwise hangs can occur during later recovery */
1056                 if (dlm_is_recovery_lock(res->lockname.name,
1057                                          res->lockname.len)) {
1058                         spin_lock(&res->spinlock);
1059                         list_for_each_entry(lock, &res->granted, list) {
1060                                 if (lock->ml.node == dead_node) {
1061                                         mlog(0, "AHA! there was "
1062                                              "a $RECOVERY lock for dead "
1063                                              "node %u (%s)!\n", 
1064                                              dead_node, dlm->name);
1065                                         list_del_init(&lock->list);
1066                                         dlm_lock_put(lock);
1067                                         break;
1068                                 }
1069                         }
1070                         spin_unlock(&res->spinlock);
1071                         continue;
1072                 }
1073
1074                 if (res->owner == dead_node) {
1075                         mlog(0, "found lockres owned by dead node while "
1076                                   "doing recovery for node %u. sending it.\n",
1077                                   dead_node);
1078                         list_move_tail(&res->recovering, list);
1079                 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1080                         mlog(0, "found UNKNOWN owner while doing recovery "
1081                                   "for node %u. sending it.\n", dead_node);
1082                         list_move_tail(&res->recovering, list);
1083                 }
1084         }
1085         spin_unlock(&dlm->spinlock);
1086 }
1087
1088 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1089 {
1090         int total_locks = 0;
1091         struct list_head *iter, *queue = &res->granted;
1092         int i;
1093
1094         for (i=0; i<3; i++) {
1095                 list_for_each(iter, queue)
1096                         total_locks++;
1097                 queue++;
1098         }
1099         return total_locks;
1100 }
1101
1102
1103 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1104                                       struct dlm_migratable_lockres *mres,
1105                                       u8 send_to,
1106                                       struct dlm_lock_resource *res,
1107                                       int total_locks)
1108 {
1109         u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1110         int mres_total_locks = be32_to_cpu(mres->total_locks);
1111         int sz, ret = 0, status = 0;
1112         u8 orig_flags = mres->flags,
1113            orig_master = mres->master;
1114
1115         BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1116         if (!mres->num_locks)
1117                 return 0;
1118
1119         sz = sizeof(struct dlm_migratable_lockres) +
1120                 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1121
1122         /* add an all-done flag if we reached the last lock */
1123         orig_flags = mres->flags;
1124         BUG_ON(total_locks > mres_total_locks);
1125         if (total_locks == mres_total_locks)
1126                 mres->flags |= DLM_MRES_ALL_DONE;
1127
1128         /* send it */
1129         ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1130                                  sz, send_to, &status);
1131         if (ret < 0) {
1132                 /* XXX: negative status is not handled.
1133                  * this will end up killing this node. */
1134                 mlog_errno(ret);
1135         } else {
1136                 /* might get an -ENOMEM back here */
1137                 ret = status;
1138                 if (ret < 0) {
1139                         mlog_errno(ret);
1140
1141                         if (ret == -EFAULT) {
1142                                 mlog(ML_ERROR, "node %u told me to kill "
1143                                      "myself!\n", send_to);
1144                                 BUG();
1145                         }
1146                 }
1147         }
1148
1149         /* zero and reinit the message buffer */
1150         dlm_init_migratable_lockres(mres, res->lockname.name,
1151                                     res->lockname.len, mres_total_locks,
1152                                     mig_cookie, orig_flags, orig_master);
1153         return ret;
1154 }
1155
1156 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1157                                         const char *lockname, int namelen,
1158                                         int total_locks, u64 cookie,
1159                                         u8 flags, u8 master)
1160 {
1161         /* mres here is one full page */
1162         memset(mres, 0, PAGE_SIZE);
1163         mres->lockname_len = namelen;
1164         memcpy(mres->lockname, lockname, namelen);
1165         mres->num_locks = 0;
1166         mres->total_locks = cpu_to_be32(total_locks);
1167         mres->mig_cookie = cpu_to_be64(cookie);
1168         mres->flags = flags;
1169         mres->master = master;
1170 }
1171
1172
1173 /* returns 1 if this lock fills the network structure,
1174  * 0 otherwise */
1175 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1176                                  struct dlm_migratable_lockres *mres, int queue)
1177 {
1178         struct dlm_migratable_lock *ml;
1179         int lock_num = mres->num_locks;
1180
1181         ml = &(mres->ml[lock_num]);
1182         ml->cookie = lock->ml.cookie;
1183         ml->type = lock->ml.type;
1184         ml->convert_type = lock->ml.convert_type;
1185         ml->highest_blocked = lock->ml.highest_blocked;
1186         ml->list = queue;
1187         if (lock->lksb) {
1188                 ml->flags = lock->lksb->flags;
1189                 /* send our current lvb */
1190                 if (ml->type == LKM_EXMODE ||
1191                     ml->type == LKM_PRMODE) {
1192                         /* if it is already set, this had better be a PR
1193                          * and it has to match */
1194                         if (!dlm_lvb_is_empty(mres->lvb) &&
1195                             (ml->type == LKM_EXMODE ||
1196                              memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1197                                 mlog(ML_ERROR, "mismatched lvbs!\n");
1198                                 __dlm_print_one_lock_resource(lock->lockres);
1199                                 BUG();
1200                         }
1201                         memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1202                 }
1203         }
1204         ml->node = lock->ml.node;
1205         mres->num_locks++;
1206         /* we reached the max, send this network message */
1207         if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1208                 return 1;
1209         return 0;
1210 }
1211
1212
1213 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1214                          struct dlm_migratable_lockres *mres,
1215                          u8 send_to, u8 flags)
1216 {
1217         struct list_head *queue, *iter;
1218         int total_locks, i;
1219         u64 mig_cookie = 0;
1220         struct dlm_lock *lock;
1221         int ret = 0;
1222
1223         BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1224
1225         mlog(0, "sending to %u\n", send_to);
1226
1227         total_locks = dlm_num_locks_in_lockres(res);
1228         if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1229                 /* rare, but possible */
1230                 mlog(0, "argh.  lockres has %d locks.  this will "
1231                           "require more than one network packet to "
1232                           "migrate\n", total_locks);
1233                 mig_cookie = dlm_get_next_mig_cookie();
1234         }
1235
1236         dlm_init_migratable_lockres(mres, res->lockname.name,
1237                                     res->lockname.len, total_locks,
1238                                     mig_cookie, flags, res->owner);
1239
1240         total_locks = 0;
1241         for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1242                 queue = dlm_list_idx_to_ptr(res, i);
1243                 list_for_each(iter, queue) {
1244                         lock = list_entry (iter, struct dlm_lock, list);
1245
1246                         /* add another lock. */
1247                         total_locks++;
1248                         if (!dlm_add_lock_to_array(lock, mres, i))
1249                                 continue;
1250
1251                         /* this filled the lock message,
1252                          * we must send it immediately. */
1253                         ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1254                                                        res, total_locks);
1255                         if (ret < 0)
1256                                 goto error;
1257                 }
1258         }
1259         /* flush any remaining locks */
1260         ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1261         if (ret < 0)
1262                 goto error;
1263         return ret;
1264
1265 error:
1266         mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1267              dlm->name, ret);
1268         if (!dlm_is_host_down(ret))
1269                 BUG();
1270         mlog(0, "%s: node %u went down while sending %s "
1271              "lockres %.*s\n", dlm->name, send_to,
1272              flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
1273              res->lockname.len, res->lockname.name);
1274         return ret;
1275 }
1276
1277
1278
1279 /*
1280  * this message will contain no more than one page worth of
1281  * recovery data, and it will work on only one lockres.
1282  * there may be many locks in this page, and we may need to wait
1283  * for additional packets to complete all the locks (rare, but
1284  * possible).
1285  */
1286 /*
1287  * NOTE: the allocation error cases here are scary
1288  * we really cannot afford to fail an alloc in recovery
1289  * do we spin?  returning an error only delays the problem really
1290  */
1291
1292 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1293 {
1294         struct dlm_ctxt *dlm = data;
1295         struct dlm_migratable_lockres *mres =
1296                 (struct dlm_migratable_lockres *)msg->buf;
1297         int ret = 0;
1298         u8 real_master;
1299         char *buf = NULL;
1300         struct dlm_work_item *item = NULL;
1301         struct dlm_lock_resource *res = NULL;
1302
1303         if (!dlm_grab(dlm))
1304                 return -EINVAL;
1305
1306         BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1307
1308         real_master = mres->master;
1309         if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1310                 /* cannot migrate a lockres with no master */
1311                 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1312         }
1313
1314         mlog(0, "%s message received from node %u\n",
1315                   (mres->flags & DLM_MRES_RECOVERY) ?
1316                   "recovery" : "migration", mres->master);
1317         if (mres->flags & DLM_MRES_ALL_DONE)
1318                 mlog(0, "all done flag.  all lockres data received!\n");
1319
1320         ret = -ENOMEM;
1321         buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1322         item = kcalloc(1, sizeof(*item), GFP_NOFS);
1323         if (!buf || !item)
1324                 goto leave;
1325
1326         /* lookup the lock to see if we have a secondary queue for this
1327          * already...  just add the locks in and this will have its owner
1328          * and RECOVERY flag changed when it completes. */
1329         res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1330         if (res) {
1331                 /* this will get a ref on res */
1332                 /* mark it as recovering/migrating and hash it */
1333                 spin_lock(&res->spinlock);
1334                 if (mres->flags & DLM_MRES_RECOVERY) {
1335                         res->state |= DLM_LOCK_RES_RECOVERING;
1336                 } else {
1337                         if (res->state & DLM_LOCK_RES_MIGRATING) {
1338                                 /* this is at least the second
1339                                  * lockres message */
1340                                 mlog(0, "lock %.*s is already migrating\n",
1341                                           mres->lockname_len,
1342                                           mres->lockname);
1343                         } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1344                                 /* caller should BUG */
1345                                 mlog(ML_ERROR, "node is attempting to migrate "
1346                                      "lock %.*s, but marked as recovering!\n",
1347                                      mres->lockname_len, mres->lockname);
1348                                 ret = -EFAULT;
1349                                 spin_unlock(&res->spinlock);
1350                                 goto leave;
1351                         }
1352                         res->state |= DLM_LOCK_RES_MIGRATING;
1353                 }
1354                 spin_unlock(&res->spinlock);
1355         } else {
1356                 /* need to allocate, just like if it was
1357                  * mastered here normally  */
1358                 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1359                 if (!res)
1360                         goto leave;
1361
1362                 /* to match the ref that we would have gotten if
1363                  * dlm_lookup_lockres had succeeded */
1364                 dlm_lockres_get(res);
1365
1366                 /* mark it as recovering/migrating and hash it */
1367                 if (mres->flags & DLM_MRES_RECOVERY)
1368                         res->state |= DLM_LOCK_RES_RECOVERING;
1369                 else
1370                         res->state |= DLM_LOCK_RES_MIGRATING;
1371
1372                 spin_lock(&dlm->spinlock);
1373                 __dlm_insert_lockres(dlm, res);
1374                 spin_unlock(&dlm->spinlock);
1375
1376                 /* now that the new lockres is inserted,
1377                  * make it usable by other processes */
1378                 spin_lock(&res->spinlock);
1379                 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1380                 spin_unlock(&res->spinlock);
1381
1382                 /* add an extra ref for just-allocated lockres 
1383                  * otherwise the lockres will be purged immediately */
1384                 dlm_lockres_get(res);
1385
1386         }
1387
1388         /* at this point we have allocated everything we need,
1389          * and we have a hashed lockres with an extra ref and
1390          * the proper res->state flags. */
1391         ret = 0;
1392         if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1393                 /* migration cannot have an unknown master */
1394                 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1395                 mlog(0, "recovery has passed me a lockres with an "
1396                           "unknown owner.. will need to requery: "
1397                           "%.*s\n", mres->lockname_len, mres->lockname);
1398         } else {
1399                 spin_lock(&res->spinlock);
1400                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1401                 spin_unlock(&res->spinlock);
1402         }
1403
1404         /* queue up work for dlm_mig_lockres_worker */
1405         dlm_grab(dlm);  /* get an extra ref for the work item */
1406         memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
1407         dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1408         item->u.ml.lockres = res; /* already have a ref */
1409         item->u.ml.real_master = real_master;
1410         spin_lock(&dlm->work_lock);
1411         list_add_tail(&item->list, &dlm->work_list);
1412         spin_unlock(&dlm->work_lock);
1413         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1414
1415 leave:
1416         dlm_put(dlm);
1417         if (ret < 0) {
1418                 if (buf)
1419                         kfree(buf);
1420                 if (item)
1421                         kfree(item);
1422         }
1423
1424         mlog_exit(ret);
1425         return ret;
1426 }
1427
1428
1429 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1430 {
1431         struct dlm_ctxt *dlm = data;
1432         struct dlm_migratable_lockres *mres;
1433         int ret = 0;
1434         struct dlm_lock_resource *res;
1435         u8 real_master;
1436
1437         dlm = item->dlm;
1438         mres = (struct dlm_migratable_lockres *)data;
1439
1440         res = item->u.ml.lockres;
1441         real_master = item->u.ml.real_master;
1442
1443         if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1444                 /* this case is super-rare. only occurs if
1445                  * node death happens during migration. */
1446 again:
1447                 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1448                 if (ret < 0) {
1449                         mlog(0, "dlm_lockres_master_requery ret=%d\n",
1450                                   ret);
1451                         goto again;
1452                 }
1453                 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1454                         mlog(0, "lockres %.*s not claimed.  "
1455                                    "this node will take it.\n",
1456                                    res->lockname.len, res->lockname.name);
1457                 } else {
1458                         mlog(0, "master needs to respond to sender "
1459                                   "that node %u still owns %.*s\n",
1460                                   real_master, res->lockname.len,
1461                                   res->lockname.name);
1462                         /* cannot touch this lockres */
1463                         goto leave;
1464                 }
1465         }
1466
1467         ret = dlm_process_recovery_data(dlm, res, mres);
1468         if (ret < 0)
1469                 mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
1470         else
1471                 mlog(0, "dlm_process_recovery_data succeeded\n");
1472
1473         if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1474                            (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1475                 ret = dlm_finish_migration(dlm, res, mres->master);
1476                 if (ret < 0)
1477                         mlog_errno(ret);
1478         }
1479
1480 leave:
1481         kfree(data);
1482         mlog_exit(ret);
1483 }
1484
1485
1486
1487 int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1488                                struct dlm_lock_resource *res, u8 *real_master)
1489 {
1490         struct dlm_node_iter iter;
1491         int nodenum;
1492         int ret = 0;
1493
1494         *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1495
1496         /* we only reach here if one of the two nodes in a
1497          * migration died while the migration was in progress.
1498          * at this point we need to requery the master.  we
1499          * know that the new_master got as far as creating
1500          * an mle on at least one node, but we do not know
1501          * if any nodes had actually cleared the mle and set
1502          * the master to the new_master.  the old master
1503          * is supposed to set the owner to UNKNOWN in the
1504          * event of a new_master death, so the only possible
1505          * responses that we can get from nodes here are
1506          * that the master is new_master, or that the master
1507          * is UNKNOWN.
1508          * if all nodes come back with UNKNOWN then we know
1509          * the lock needs remastering here.
1510          * if any node comes back with a valid master, check
1511          * to see if that master is the one that we are
1512          * recovering.  if so, then the new_master died and
1513          * we need to remaster this lock.  if not, then the
1514          * new_master survived and that node will respond to
1515          * other nodes about the owner.
1516          * if there is an owner, this node needs to dump this
1517          * lockres and alert the sender that this lockres
1518          * was rejected. */
1519         spin_lock(&dlm->spinlock);
1520         dlm_node_iter_init(dlm->domain_map, &iter);
1521         spin_unlock(&dlm->spinlock);
1522
1523         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1524                 /* do not send to self */
1525                 if (nodenum == dlm->node_num)
1526                         continue;
1527                 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1528                 if (ret < 0) {
1529                         mlog_errno(ret);
1530                         if (!dlm_is_host_down(ret))
1531                                 BUG();
1532                         /* host is down, so answer for that node would be
1533                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1534                 }
1535                 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1536                         mlog(0, "lock master is %u\n", *real_master);
1537                         break;
1538                 }
1539         }
1540         return ret;
1541 }
1542
1543
1544 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1545                           u8 nodenum, u8 *real_master)
1546 {
1547         int ret = -EINVAL;
1548         struct dlm_master_requery req;
1549         int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1550
1551         memset(&req, 0, sizeof(req));
1552         req.node_idx = dlm->node_num;
1553         req.namelen = res->lockname.len;
1554         memcpy(req.name, res->lockname.name, res->lockname.len);
1555
1556         ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1557                                  &req, sizeof(req), nodenum, &status);
1558         /* XXX: negative status not handled properly here. */
1559         if (ret < 0)
1560                 mlog_errno(ret);
1561         else {
1562                 BUG_ON(status < 0);
1563                 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1564                 *real_master = (u8) (status & 0xff);
1565                 mlog(0, "node %u responded to master requery with %u\n",
1566                           nodenum, *real_master);
1567                 ret = 0;
1568         }
1569         return ret;
1570 }
1571
1572
1573 /* this function cannot error, so unless the sending
1574  * or receiving of the message failed, the owner can
1575  * be trusted */
1576 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1577 {
1578         struct dlm_ctxt *dlm = data;
1579         struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1580         struct dlm_lock_resource *res = NULL;
1581         unsigned int hash;
1582         int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1583         u32 flags = DLM_ASSERT_MASTER_REQUERY;
1584
1585         if (!dlm_grab(dlm)) {
1586                 /* since the domain has gone away on this
1587                  * node, the proper response is UNKNOWN */
1588                 return master;
1589         }
1590
1591         hash = dlm_lockid_hash(req->name, req->namelen);
1592
1593         spin_lock(&dlm->spinlock);
1594         res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1595         if (res) {
1596                 spin_lock(&res->spinlock);
1597                 master = res->owner;
1598                 if (master == dlm->node_num) {
1599                         int ret = dlm_dispatch_assert_master(dlm, res,
1600                                                              0, 0, flags);
1601                         if (ret < 0) {
1602                                 mlog_errno(-ENOMEM);
1603                                 /* retry!? */
1604                                 BUG();
1605                         }
1606                 }
1607                 spin_unlock(&res->spinlock);
1608         }
1609         spin_unlock(&dlm->spinlock);
1610
1611         dlm_put(dlm);
1612         return master;
1613 }
1614
1615 static inline struct list_head *
1616 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1617 {
1618         struct list_head *ret;
1619         BUG_ON(list_num < 0);
1620         BUG_ON(list_num > 2);
1621         ret = &(res->granted);
1622         ret += list_num;
1623         return ret;
1624 }
1625 /* TODO: do ast flush business
1626  * TODO: do MIGRATING and RECOVERING spinning
1627  */
1628
1629 /*
1630 * NOTE about in-flight requests during migration:
1631 *
1632 * Before attempting the migrate, the master has marked the lockres as
1633 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
1634 * requests either got queued before the MIGRATING flag got set, in which
1635 * case the lock data will reflect the change and a return message is on
1636 * the way, or the request failed to get in before MIGRATING got set.  In
1637 * this case, the caller will be told to spin and wait for the MIGRATING
1638 * flag to be dropped, then recheck the master.
1639 * This holds true for the convert, cancel and unlock cases, and since lvb
1640 * updates are tied to these same messages, it applies to lvb updates as
1641 * well.  For the lock case, there is no way a lock can be on the master
1642 * queue and not be on the secondary queue since the lock is always added
1643 * locally first.  This means that the new target node will never be sent
1644 * a lock that he doesn't already have on the list.
1645 * In total, this means that the local lock is correct and should not be
1646 * updated to match the one sent by the master.  Any messages sent back
1647 * from the master before the MIGRATING flag will bring the lock properly
1648 * up-to-date, and the change will be ordered properly for the waiter.
1649 * We will *not* attempt to modify the lock underneath the waiter.
1650 */
1651
1652 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1653                                      struct dlm_lock_resource *res,
1654                                      struct dlm_migratable_lockres *mres)
1655 {
1656         struct dlm_migratable_lock *ml;
1657         struct list_head *queue;
1658         struct dlm_lock *newlock = NULL;
1659         struct dlm_lockstatus *lksb = NULL;
1660         int ret = 0;
1661         int i, bad;
1662         struct list_head *iter;
1663         struct dlm_lock *lock = NULL;
1664
1665         mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1666         for (i=0; i<mres->num_locks; i++) {
1667                 ml = &(mres->ml[i]);
1668                 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1669                 newlock = NULL;
1670                 lksb = NULL;
1671
1672                 queue = dlm_list_num_to_pointer(res, ml->list);
1673
1674                 /* if the lock is for the local node it needs to
1675                  * be moved to the proper location within the queue.
1676                  * do not allocate a new lock structure. */
1677                 if (ml->node == dlm->node_num) {
1678                         /* MIGRATION ONLY! */
1679                         BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1680
1681                         spin_lock(&res->spinlock);
1682                         list_for_each(iter, queue) {
1683                                 lock = list_entry (iter, struct dlm_lock, list);
1684                                 if (lock->ml.cookie != ml->cookie)
1685                                         lock = NULL;
1686                                 else
1687                                         break;
1688                         }
1689
1690                         /* lock is always created locally first, and
1691                          * destroyed locally last.  it must be on the list */
1692                         if (!lock) {
1693                                 u64 c = ml->cookie;
1694                                 mlog(ML_ERROR, "could not find local lock "
1695                                                "with cookie %u:%llu!\n",
1696                                                dlm_get_lock_cookie_node(c),
1697                                                dlm_get_lock_cookie_seq(c));
1698                                 BUG();
1699                         }
1700                         BUG_ON(lock->ml.node != ml->node);
1701
1702                         /* see NOTE above about why we do not update
1703                          * to match the master here */
1704
1705                         /* move the lock to its proper place */
1706                         /* do not alter lock refcount.  switching lists. */
1707                         list_move_tail(&lock->list, queue);
1708                         spin_unlock(&res->spinlock);
1709
1710                         mlog(0, "just reordered a local lock!\n");
1711                         continue;
1712                 }
1713
1714                 /* lock is for another node. */
1715                 newlock = dlm_new_lock(ml->type, ml->node,
1716                                        be64_to_cpu(ml->cookie), NULL);
1717                 if (!newlock) {
1718                         ret = -ENOMEM;
1719                         goto leave;
1720                 }
1721                 lksb = newlock->lksb;
1722                 dlm_lock_attach_lockres(newlock, res);
1723
1724                 if (ml->convert_type != LKM_IVMODE) {
1725                         BUG_ON(queue != &res->converting);
1726                         newlock->ml.convert_type = ml->convert_type;
1727                 }
1728                 lksb->flags |= (ml->flags &
1729                                 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1730
1731                 if (ml->type == LKM_NLMODE)
1732                         goto skip_lvb;
1733
1734                 if (!dlm_lvb_is_empty(mres->lvb)) {
1735                         if (lksb->flags & DLM_LKSB_PUT_LVB) {
1736                                 /* other node was trying to update
1737                                  * lvb when node died.  recreate the
1738                                  * lksb with the updated lvb. */
1739                                 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1740                                 /* the lock resource lvb update must happen
1741                                  * NOW, before the spinlock is dropped.
1742                                  * we no longer wait for the AST to update
1743                                  * the lvb. */
1744                                 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1745                         } else {
1746                                 /* otherwise, the node is sending its 
1747                                  * most recent valid lvb info */
1748                                 BUG_ON(ml->type != LKM_EXMODE &&
1749                                        ml->type != LKM_PRMODE);
1750                                 if (!dlm_lvb_is_empty(res->lvb) &&
1751                                     (ml->type == LKM_EXMODE ||
1752                                      memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1753                                         int i;
1754                                         mlog(ML_ERROR, "%s:%.*s: received bad "
1755                                              "lvb! type=%d\n", dlm->name,
1756                                              res->lockname.len,
1757                                              res->lockname.name, ml->type);
1758                                         printk("lockres lvb=[");
1759                                         for (i=0; i<DLM_LVB_LEN; i++)
1760                                                 printk("%02x", res->lvb[i]);
1761                                         printk("]\nmigrated lvb=[");
1762                                         for (i=0; i<DLM_LVB_LEN; i++)
1763                                                 printk("%02x", mres->lvb[i]);
1764                                         printk("]\n");
1765                                         dlm_print_one_lock_resource(res);
1766                                         BUG();
1767                                 }
1768                                 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1769                         }
1770                 }
1771 skip_lvb:
1772
1773                 /* NOTE:
1774                  * wrt lock queue ordering and recovery:
1775                  *    1. order of locks on granted queue is
1776                  *       meaningless.
1777                  *    2. order of locks on converting queue is
1778                  *       LOST with the node death.  sorry charlie.
1779                  *    3. order of locks on the blocked queue is
1780                  *       also LOST.
1781                  * order of locks does not affect integrity, it
1782                  * just means that a lock request may get pushed
1783                  * back in line as a result of the node death.
1784                  * also note that for a given node the lock order
1785                  * for its secondary queue locks is preserved
1786                  * relative to each other, but clearly *not*
1787                  * preserved relative to locks from other nodes.
1788                  */
1789                 bad = 0;
1790                 spin_lock(&res->spinlock);
1791                 list_for_each_entry(lock, queue, list) {
1792                         if (lock->ml.cookie == ml->cookie) {
1793                                 u64 c = lock->ml.cookie;
1794                                 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1795                                      "exists on this lockres!\n", dlm->name,
1796                                      res->lockname.len, res->lockname.name,
1797                                      dlm_get_lock_cookie_node(c),
1798                                      dlm_get_lock_cookie_seq(c));
1799
1800                                 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1801                                      "node=%u, cookie=%u:%llu, queue=%d\n",
1802                                      ml->type, ml->convert_type, ml->node,
1803                                      dlm_get_lock_cookie_node(ml->cookie),
1804                                      dlm_get_lock_cookie_seq(ml->cookie),
1805                                      ml->list);
1806
1807                                 __dlm_print_one_lock_resource(res);
1808                                 bad = 1;
1809                                 break;
1810                         }
1811                 }
1812                 if (!bad) {
1813                         dlm_lock_get(newlock);
1814                         list_add_tail(&newlock->list, queue);
1815                 }
1816                 spin_unlock(&res->spinlock);
1817         }
1818         mlog(0, "done running all the locks\n");
1819
1820 leave:
1821         if (ret < 0) {
1822                 mlog_errno(ret);
1823                 if (newlock)
1824                         dlm_lock_put(newlock);
1825         }
1826
1827         mlog_exit(ret);
1828         return ret;
1829 }
1830
1831 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1832                                        struct dlm_lock_resource *res)
1833 {
1834         int i;
1835         struct list_head *queue, *iter, *iter2;
1836         struct dlm_lock *lock;
1837
1838         res->state |= DLM_LOCK_RES_RECOVERING;
1839         if (!list_empty(&res->recovering)) {
1840                 mlog(0,
1841                      "Recovering res %s:%.*s, is already on recovery list!\n",
1842                      dlm->name, res->lockname.len, res->lockname.name);
1843                 list_del_init(&res->recovering);
1844         }
1845         /* We need to hold a reference while on the recovery list */
1846         dlm_lockres_get(res);
1847         list_add_tail(&res->recovering, &dlm->reco.resources);
1848
1849         /* find any pending locks and put them back on proper list */
1850         for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1851                 queue = dlm_list_idx_to_ptr(res, i);
1852                 list_for_each_safe(iter, iter2, queue) {
1853                         lock = list_entry (iter, struct dlm_lock, list);
1854                         dlm_lock_get(lock);
1855                         if (lock->convert_pending) {
1856                                 /* move converting lock back to granted */
1857                                 BUG_ON(i != DLM_CONVERTING_LIST);
1858                                 mlog(0, "node died with convert pending "
1859                                      "on %.*s. move back to granted list.\n",
1860                                      res->lockname.len, res->lockname.name);
1861                                 dlm_revert_pending_convert(res, lock);
1862                                 lock->convert_pending = 0;
1863                         } else if (lock->lock_pending) {
1864                                 /* remove pending lock requests completely */
1865                                 BUG_ON(i != DLM_BLOCKED_LIST);
1866                                 mlog(0, "node died with lock pending "
1867                                      "on %.*s. remove from blocked list and skip.\n",
1868                                      res->lockname.len, res->lockname.name);
1869                                 /* lock will be floating until ref in
1870                                  * dlmlock_remote is freed after the network
1871                                  * call returns.  ok for it to not be on any
1872                                  * list since no ast can be called
1873                                  * (the master is dead). */
1874                                 dlm_revert_pending_lock(res, lock);
1875                                 lock->lock_pending = 0;
1876                         } else if (lock->unlock_pending) {
1877                                 /* if an unlock was in progress, treat as
1878                                  * if this had completed successfully
1879                                  * before sending this lock state to the
1880                                  * new master.  note that the dlm_unlock
1881                                  * call is still responsible for calling
1882                                  * the unlockast.  that will happen after
1883                                  * the network call times out.  for now,
1884                                  * just move lists to prepare the new
1885                                  * recovery master.  */
1886                                 BUG_ON(i != DLM_GRANTED_LIST);
1887                                 mlog(0, "node died with unlock pending "
1888                                      "on %.*s. remove from blocked list and skip.\n",
1889                                      res->lockname.len, res->lockname.name);
1890                                 dlm_commit_pending_unlock(res, lock);
1891                                 lock->unlock_pending = 0;
1892                         } else if (lock->cancel_pending) {
1893                                 /* if a cancel was in progress, treat as
1894                                  * if this had completed successfully
1895                                  * before sending this lock state to the
1896                                  * new master */
1897                                 BUG_ON(i != DLM_CONVERTING_LIST);
1898                                 mlog(0, "node died with cancel pending "
1899                                      "on %.*s. move back to granted list.\n",
1900                                      res->lockname.len, res->lockname.name);
1901                                 dlm_commit_pending_cancel(res, lock);
1902                                 lock->cancel_pending = 0;
1903                         }
1904                         dlm_lock_put(lock);
1905                 }
1906         }
1907 }
1908
1909
1910
1911 /* removes all recovered locks from the recovery list.
1912  * sets the res->owner to the new master.
1913  * unsets the RECOVERY flag and wakes waiters. */
1914 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1915                                               u8 dead_node, u8 new_master)
1916 {
1917         int i;
1918         struct list_head *iter, *iter2;
1919         struct hlist_node *hash_iter;
1920         struct hlist_head *bucket;
1921
1922         struct dlm_lock_resource *res;
1923
1924         mlog_entry_void();
1925
1926         assert_spin_locked(&dlm->spinlock);
1927
1928         list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1929                 res = list_entry (iter, struct dlm_lock_resource, recovering);
1930                 if (res->owner == dead_node) {
1931                         list_del_init(&res->recovering);
1932                         spin_lock(&res->spinlock);
1933                         dlm_change_lockres_owner(dlm, res, new_master);
1934                         res->state &= ~DLM_LOCK_RES_RECOVERING;
1935                         if (!__dlm_lockres_unused(res))
1936                                 __dlm_dirty_lockres(dlm, res);
1937                         spin_unlock(&res->spinlock);
1938                         wake_up(&res->wq);
1939                         dlm_lockres_put(res);
1940                 }
1941         }
1942
1943         /* this will become unnecessary eventually, but
1944          * for now we need to run the whole hash, clear
1945          * the RECOVERING state and set the owner
1946          * if necessary */
1947         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1948                 bucket = dlm_lockres_hash(dlm, i);
1949                 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1950                         if (res->state & DLM_LOCK_RES_RECOVERING) {
1951                                 if (res->owner == dead_node) {
1952                                         mlog(0, "(this=%u) res %.*s owner=%u "
1953                                              "was not on recovering list, but "
1954                                              "clearing state anyway\n",
1955                                              dlm->node_num, res->lockname.len,
1956                                              res->lockname.name, new_master);
1957                                 } else if (res->owner == dlm->node_num) {
1958                                         mlog(0, "(this=%u) res %.*s owner=%u "
1959                                              "was not on recovering list, "
1960                                              "owner is THIS node, clearing\n",
1961                                              dlm->node_num, res->lockname.len,
1962                                              res->lockname.name, new_master);
1963                                 } else
1964                                         continue;
1965
1966                                 if (!list_empty(&res->recovering)) {
1967                                         mlog(0, "%s:%.*s: lockres was "
1968                                              "marked RECOVERING, owner=%u\n",
1969                                              dlm->name, res->lockname.len,
1970                                              res->lockname.name, res->owner);
1971                                         list_del_init(&res->recovering);
1972                                         dlm_lockres_put(res);
1973                                 }
1974                                 spin_lock(&res->spinlock);
1975                                 dlm_change_lockres_owner(dlm, res, new_master);
1976                                 res->state &= ~DLM_LOCK_RES_RECOVERING;
1977                                 if (!__dlm_lockres_unused(res))
1978                                         __dlm_dirty_lockres(dlm, res);
1979                                 spin_unlock(&res->spinlock);
1980                                 wake_up(&res->wq);
1981                         }
1982                 }
1983         }
1984 }
1985
1986 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1987 {
1988         if (local) {
1989                 if (lock->ml.type != LKM_EXMODE &&
1990                     lock->ml.type != LKM_PRMODE)
1991                         return 1;
1992         } else if (lock->ml.type == LKM_EXMODE)
1993                 return 1;
1994         return 0;
1995 }
1996
1997 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1998                                struct dlm_lock_resource *res, u8 dead_node)
1999 {
2000         struct list_head *iter, *queue;
2001         struct dlm_lock *lock;
2002         int blank_lvb = 0, local = 0;
2003         int i;
2004         u8 search_node;
2005
2006         assert_spin_locked(&dlm->spinlock);
2007         assert_spin_locked(&res->spinlock);
2008
2009         if (res->owner == dlm->node_num)
2010                 /* if this node owned the lockres, and if the dead node 
2011                  * had an EX when he died, blank out the lvb */
2012                 search_node = dead_node;
2013         else {
2014                 /* if this is a secondary lockres, and we had no EX or PR
2015                  * locks granted, we can no longer trust the lvb */
2016                 search_node = dlm->node_num;
2017                 local = 1;  /* check local state for valid lvb */
2018         }
2019
2020         for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2021                 queue = dlm_list_idx_to_ptr(res, i);
2022                 list_for_each(iter, queue) {
2023                         lock = list_entry (iter, struct dlm_lock, list);
2024                         if (lock->ml.node == search_node) {
2025                                 if (dlm_lvb_needs_invalidation(lock, local)) {
2026                                         /* zero the lksb lvb and lockres lvb */
2027                                         blank_lvb = 1;
2028                                         memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2029                                 }
2030                         }
2031                 }
2032         }
2033
2034         if (blank_lvb) {
2035                 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2036                      res->lockname.len, res->lockname.name, dead_node);
2037                 memset(res->lvb, 0, DLM_LVB_LEN);
2038         }
2039 }
2040
2041 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2042                                 struct dlm_lock_resource *res, u8 dead_node)
2043 {
2044         struct list_head *iter, *tmpiter;
2045         struct dlm_lock *lock;
2046
2047         /* this node is the lockres master:
2048          * 1) remove any stale locks for the dead node
2049          * 2) if the dead node had an EX when he died, blank out the lvb 
2050          */
2051         assert_spin_locked(&dlm->spinlock);
2052         assert_spin_locked(&res->spinlock);
2053
2054         /* TODO: check pending_asts, pending_basts here */
2055         list_for_each_safe(iter, tmpiter, &res->granted) {
2056                 lock = list_entry (iter, struct dlm_lock, list);
2057                 if (lock->ml.node == dead_node) {
2058                         list_del_init(&lock->list);
2059                         dlm_lock_put(lock);
2060                 }
2061         }
2062         list_for_each_safe(iter, tmpiter, &res->converting) {
2063                 lock = list_entry (iter, struct dlm_lock, list);
2064                 if (lock->ml.node == dead_node) {
2065                         list_del_init(&lock->list);
2066                         dlm_lock_put(lock);
2067                 }
2068         }
2069         list_for_each_safe(iter, tmpiter, &res->blocked) {
2070                 lock = list_entry (iter, struct dlm_lock, list);
2071                 if (lock->ml.node == dead_node) {
2072                         list_del_init(&lock->list);
2073                         dlm_lock_put(lock);
2074                 }
2075         }
2076
2077         /* do not kick thread yet */
2078         __dlm_dirty_lockres(dlm, res);
2079 }
2080
2081 /* if this node is the recovery master, and there are no
2082  * locks for a given lockres owned by this node that are in
2083  * either PR or EX mode, zero out the lvb before requesting.
2084  *
2085  */
2086
2087
2088 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2089 {
2090         struct hlist_node *iter;
2091         struct dlm_lock_resource *res;
2092         int i;
2093         struct hlist_head *bucket;
2094         struct dlm_lock *lock;
2095
2096
2097         /* purge any stale mles */
2098         dlm_clean_master_list(dlm, dead_node);
2099
2100         /*
2101          * now clean up all lock resources.  there are two rules:
2102          *
2103          * 1) if the dead node was the master, move the lockres
2104          *    to the recovering list.  set the RECOVERING flag.
2105          *    this lockres needs to be cleaned up before it can
2106          *    be used further.
2107          *
2108          * 2) if this node was the master, remove all locks from
2109          *    each of the lockres queues that were owned by the
2110          *    dead node.  once recovery finishes, the dlm thread
2111          *    can be kicked again to see if any ASTs or BASTs
2112          *    need to be fired as a result.
2113          */
2114         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2115                 bucket = dlm_lockres_hash(dlm, i);
2116                 hlist_for_each_entry(res, iter, bucket, hash_node) {
2117                         /* always prune any $RECOVERY entries for dead nodes,
2118                          * otherwise hangs can occur during later recovery */
2119                         if (dlm_is_recovery_lock(res->lockname.name,
2120                                                  res->lockname.len)) {
2121                                 spin_lock(&res->spinlock);
2122                                 list_for_each_entry(lock, &res->granted, list) {
2123                                         if (lock->ml.node == dead_node) {
2124                                                 mlog(0, "AHA! there was "
2125                                                      "a $RECOVERY lock for dead "
2126                                                      "node %u (%s)!\n",
2127                                                      dead_node, dlm->name);
2128                                                 list_del_init(&lock->list);
2129                                                 dlm_lock_put(lock);
2130                                                 break;
2131                                         }
2132                                 }
2133                                 spin_unlock(&res->spinlock);
2134                                 continue;
2135                         }                       
2136                         spin_lock(&res->spinlock);
2137                         /* zero the lvb if necessary */
2138                         dlm_revalidate_lvb(dlm, res, dead_node);
2139                         if (res->owner == dead_node)
2140                                 dlm_move_lockres_to_recovery_list(dlm, res);
2141                         else if (res->owner == dlm->node_num) {
2142                                 dlm_free_dead_locks(dlm, res, dead_node);
2143                                 __dlm_lockres_calc_usage(dlm, res);
2144                         }
2145                         spin_unlock(&res->spinlock);
2146                 }
2147         }
2148
2149 }
2150
2151 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2152 {
2153         assert_spin_locked(&dlm->spinlock);
2154
2155         if (dlm->reco.new_master == idx) {
2156                 mlog(0, "%s: recovery master %d just died\n",
2157                      dlm->name, idx);
2158                 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2159                         /* finalize1 was reached, so it is safe to clear
2160                          * the new_master and dead_node.  that recovery
2161                          * is complete. */
2162                         mlog(0, "%s: dead master %d had reached "
2163                              "finalize1 state, clearing\n", dlm->name, idx);
2164                         dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2165                         __dlm_reset_recovery(dlm);
2166                 }
2167         }
2168
2169         /* check to see if the node is already considered dead */
2170         if (!test_bit(idx, dlm->live_nodes_map)) {
2171                 mlog(0, "for domain %s, node %d is already dead. "
2172                      "another node likely did recovery already.\n",
2173                      dlm->name, idx);
2174                 return;
2175         }
2176
2177         /* check to see if we do not care about this node */
2178         if (!test_bit(idx, dlm->domain_map)) {
2179                 /* This also catches the case that we get a node down
2180                  * but haven't joined the domain yet. */
2181                 mlog(0, "node %u already removed from domain!\n", idx);
2182                 return;
2183         }
2184
2185         clear_bit(idx, dlm->live_nodes_map);
2186
2187         /* Clean up join state on node death. */
2188         if (dlm->joining_node == idx) {
2189                 mlog(0, "Clearing join state for node %u\n", idx);
2190                 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2191         }
2192
2193         /* make sure local cleanup occurs before the heartbeat events */
2194         if (!test_bit(idx, dlm->recovery_map))
2195                 dlm_do_local_recovery_cleanup(dlm, idx);
2196
2197         /* notify anything attached to the heartbeat events */
2198         dlm_hb_event_notify_attached(dlm, idx, 0);
2199
2200         mlog(0, "node %u being removed from domain map!\n", idx);
2201         clear_bit(idx, dlm->domain_map);
2202         /* wake up migration waiters if a node goes down.
2203          * perhaps later we can genericize this for other waiters. */
2204         wake_up(&dlm->migration_wq);
2205
2206         if (test_bit(idx, dlm->recovery_map))
2207                 mlog(0, "domain %s, node %u already added "
2208                      "to recovery map!\n", dlm->name, idx);
2209         else
2210                 set_bit(idx, dlm->recovery_map);
2211 }
2212
2213 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2214 {
2215         struct dlm_ctxt *dlm = data;
2216
2217         if (!dlm_grab(dlm))
2218                 return;
2219
2220         spin_lock(&dlm->spinlock);
2221         __dlm_hb_node_down(dlm, idx);
2222         spin_unlock(&dlm->spinlock);
2223
2224         dlm_put(dlm);
2225 }
2226
2227 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2228 {
2229         struct dlm_ctxt *dlm = data;
2230
2231         if (!dlm_grab(dlm))
2232                 return;
2233
2234         spin_lock(&dlm->spinlock);
2235         set_bit(idx, dlm->live_nodes_map);
2236         /* do NOT notify mle attached to the heartbeat events.
2237          * new nodes are not interesting in mastery until joined. */
2238         spin_unlock(&dlm->spinlock);
2239
2240         dlm_put(dlm);
2241 }
2242
2243 static void dlm_reco_ast(void *astdata)
2244 {
2245         struct dlm_ctxt *dlm = astdata;
2246         mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2247              dlm->node_num, dlm->name);
2248 }
2249 static void dlm_reco_bast(void *astdata, int blocked_type)
2250 {
2251         struct dlm_ctxt *dlm = astdata;
2252         mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2253              dlm->node_num, dlm->name);
2254 }
2255 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2256 {
2257         mlog(0, "unlockast for recovery lock fired!\n");
2258 }
2259
2260 /*
2261  * dlm_pick_recovery_master will continually attempt to use
2262  * dlmlock() on the special "$RECOVERY" lockres with the
2263  * LKM_NOQUEUE flag to get an EX.  every thread that enters
2264  * this function on each node racing to become the recovery
2265  * master will not stop attempting this until either:
2266  * a) this node gets the EX (and becomes the recovery master),
2267  * or b) dlm->reco.new_master gets set to some nodenum 
2268  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2269  * so each time a recovery master is needed, the entire cluster
2270  * will sync at this point.  if the new master dies, that will
2271  * be detected in dlm_do_recovery */
2272 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2273 {
2274         enum dlm_status ret;
2275         struct dlm_lockstatus lksb;
2276         int status = -EINVAL;
2277
2278         mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2279              dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2280 again:  
2281         memset(&lksb, 0, sizeof(lksb));
2282
2283         ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2284                       DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2285
2286         mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2287              dlm->name, ret, lksb.status);
2288
2289         if (ret == DLM_NORMAL) {
2290                 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2291                      dlm->name, dlm->node_num);
2292                 
2293                 /* got the EX lock.  check to see if another node 
2294                  * just became the reco master */
2295                 if (dlm_reco_master_ready(dlm)) {
2296                         mlog(0, "%s: got reco EX lock, but %u will "
2297                              "do the recovery\n", dlm->name,
2298                              dlm->reco.new_master);
2299                         status = -EEXIST;
2300                 } else {
2301                         status = 0;
2302
2303                         /* see if recovery was already finished elsewhere */
2304                         spin_lock(&dlm->spinlock);
2305                         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2306                                 status = -EINVAL;       
2307                                 mlog(0, "%s: got reco EX lock, but "
2308                                      "node got recovered already\n", dlm->name);
2309                                 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2310                                         mlog(ML_ERROR, "%s: new master is %u "
2311                                              "but no dead node!\n", 
2312                                              dlm->name, dlm->reco.new_master);
2313                                         BUG();
2314                                 }
2315                         }
2316                         spin_unlock(&dlm->spinlock);
2317                 }
2318
2319                 /* if this node has actually become the recovery master,
2320                  * set the master and send the messages to begin recovery */
2321                 if (!status) {
2322                         mlog(0, "%s: dead=%u, this=%u, sending "
2323                              "begin_reco now\n", dlm->name, 
2324                              dlm->reco.dead_node, dlm->node_num);
2325                         status = dlm_send_begin_reco_message(dlm,
2326                                       dlm->reco.dead_node);
2327                         /* this always succeeds */
2328                         BUG_ON(status);
2329
2330                         /* set the new_master to this node */
2331                         spin_lock(&dlm->spinlock);
2332                         dlm_set_reco_master(dlm, dlm->node_num);
2333                         spin_unlock(&dlm->spinlock);
2334                 }
2335
2336                 /* recovery lock is a special case.  ast will not get fired,
2337                  * so just go ahead and unlock it. */
2338                 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2339                 if (ret == DLM_DENIED) {
2340                         mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2341                         ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2342                 }
2343                 if (ret != DLM_NORMAL) {
2344                         /* this would really suck. this could only happen
2345                          * if there was a network error during the unlock
2346                          * because of node death.  this means the unlock
2347                          * is actually "done" and the lock structure is
2348                          * even freed.  we can continue, but only
2349                          * because this specific lock name is special. */
2350                         mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2351                 }
2352         } else if (ret == DLM_NOTQUEUED) {
2353                 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2354                      dlm->name, dlm->node_num);
2355                 /* another node is master. wait on
2356                  * reco.new_master != O2NM_INVALID_NODE_NUM 
2357                  * for at most one second */
2358                 wait_event_timeout(dlm->dlm_reco_thread_wq,
2359                                          dlm_reco_master_ready(dlm),
2360                                          msecs_to_jiffies(1000));
2361                 if (!dlm_reco_master_ready(dlm)) {
2362                         mlog(0, "%s: reco master taking awhile\n",
2363                              dlm->name);
2364                         goto again;
2365                 }
2366                 /* another node has informed this one that it is reco master */
2367                 mlog(0, "%s: reco master %u is ready to recover %u\n",
2368                      dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2369                 status = -EEXIST;
2370         } else if (ret == DLM_RECOVERING) {
2371                 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2372                      dlm->name, dlm->node_num);
2373                 goto again;
2374         } else {
2375                 struct dlm_lock_resource *res;
2376
2377                 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2378                 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2379                      "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2380                      dlm_errname(lksb.status));
2381                 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2382                                          DLM_RECOVERY_LOCK_NAME_LEN);
2383                 if (res) {
2384                         dlm_print_one_lock_resource(res);
2385                         dlm_lockres_put(res);
2386                 } else {
2387                         mlog(ML_ERROR, "recovery lock not found\n");
2388                 }
2389                 BUG();
2390         }
2391
2392         return status;
2393 }
2394
2395 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2396 {
2397         struct dlm_begin_reco br;
2398         int ret = 0;
2399         struct dlm_node_iter iter;
2400         int nodenum;
2401         int status;
2402
2403         mlog_entry("%u\n", dead_node);
2404
2405         mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2406
2407         spin_lock(&dlm->spinlock);
2408         dlm_node_iter_init(dlm->domain_map, &iter);
2409         spin_unlock(&dlm->spinlock);
2410
2411         clear_bit(dead_node, iter.node_map);
2412
2413         memset(&br, 0, sizeof(br));
2414         br.node_idx = dlm->node_num;
2415         br.dead_node = dead_node;
2416
2417         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2418                 ret = 0;
2419                 if (nodenum == dead_node) {
2420                         mlog(0, "not sending begin reco to dead node "
2421                                   "%u\n", dead_node);
2422                         continue;
2423                 }
2424                 if (nodenum == dlm->node_num) {
2425                         mlog(0, "not sending begin reco to self\n");
2426                         continue;
2427                 }
2428 retry:
2429                 ret = -EINVAL;
2430                 mlog(0, "attempting to send begin reco msg to %d\n",
2431                           nodenum);
2432                 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2433                                          &br, sizeof(br), nodenum, &status);
2434                 /* negative status is handled ok by caller here */
2435                 if (ret >= 0)
2436                         ret = status;
2437                 if (dlm_is_host_down(ret)) {
2438                         /* node is down.  not involved in recovery
2439                          * so just keep going */
2440                         mlog(0, "%s: node %u was down when sending "
2441                              "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2442                         ret = 0;
2443                 }
2444                 if (ret < 0) {
2445                         struct dlm_lock_resource *res;
2446                         /* this is now a serious problem, possibly ENOMEM 
2447                          * in the network stack.  must retry */
2448                         mlog_errno(ret);
2449                         mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2450                             " returned %d\n", dlm->name, nodenum, ret);
2451                         res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2452                                                  DLM_RECOVERY_LOCK_NAME_LEN);
2453                         if (res) {
2454                                 dlm_print_one_lock_resource(res);
2455                                 dlm_lockres_put(res);
2456                         } else {
2457                                 mlog(ML_ERROR, "recovery lock not found\n");
2458                         }
2459                         /* sleep for a bit in hopes that we can avoid 
2460                          * another ENOMEM */
2461                         msleep(100);
2462                         goto retry;
2463                 } else if (ret == EAGAIN) {
2464                         mlog(0, "%s: trying to start recovery of node "
2465                              "%u, but node %u is waiting for last recovery "
2466                              "to complete, backoff for a bit\n", dlm->name,
2467                              dead_node, nodenum);
2468                         /* TODO Look into replacing msleep with cond_resched() */
2469                         msleep(100);
2470                         goto retry;
2471                 }
2472         }
2473
2474         return ret;
2475 }
2476
2477 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2478 {
2479         struct dlm_ctxt *dlm = data;
2480         struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2481
2482         /* ok to return 0, domain has gone away */
2483         if (!dlm_grab(dlm))
2484                 return 0;
2485
2486         spin_lock(&dlm->spinlock);
2487         if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2488                 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2489                      "but this node is in finalize state, waiting on finalize2\n",
2490                      dlm->name, br->node_idx, br->dead_node,
2491                      dlm->reco.dead_node, dlm->reco.new_master);
2492                 spin_unlock(&dlm->spinlock);
2493                 return EAGAIN;
2494         }
2495         spin_unlock(&dlm->spinlock);
2496
2497         mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2498              dlm->name, br->node_idx, br->dead_node,
2499              dlm->reco.dead_node, dlm->reco.new_master);
2500
2501         dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2502
2503         spin_lock(&dlm->spinlock);
2504         if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2505                 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2506                         mlog(0, "%s: new_master %u died, changing "
2507                              "to %u\n", dlm->name, dlm->reco.new_master,
2508                              br->node_idx);
2509                 } else {
2510                         mlog(0, "%s: new_master %u NOT DEAD, changing "
2511                              "to %u\n", dlm->name, dlm->reco.new_master,
2512                              br->node_idx);
2513                         /* may not have seen the new master as dead yet */
2514                 }
2515         }
2516         if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2517                 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2518                      "node %u changing it to %u\n", dlm->name, 
2519                      dlm->reco.dead_node, br->node_idx, br->dead_node);
2520         }
2521         dlm_set_reco_master(dlm, br->node_idx);
2522         dlm_set_reco_dead_node(dlm, br->dead_node);
2523         if (!test_bit(br->dead_node, dlm->recovery_map)) {
2524                 mlog(0, "recovery master %u sees %u as dead, but this "
2525                      "node has not yet.  marking %u as dead\n",
2526                      br->node_idx, br->dead_node, br->dead_node);
2527                 if (!test_bit(br->dead_node, dlm->domain_map) ||
2528                     !test_bit(br->dead_node, dlm->live_nodes_map))
2529                         mlog(0, "%u not in domain/live_nodes map "
2530                              "so setting it in reco map manually\n",
2531                              br->dead_node);
2532                 /* force the recovery cleanup in __dlm_hb_node_down
2533                  * both of these will be cleared in a moment */
2534                 set_bit(br->dead_node, dlm->domain_map);
2535                 set_bit(br->dead_node, dlm->live_nodes_map);
2536                 __dlm_hb_node_down(dlm, br->dead_node);
2537         }
2538         spin_unlock(&dlm->spinlock);
2539
2540         dlm_kick_recovery_thread(dlm);
2541
2542         mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2543              dlm->name, br->node_idx, br->dead_node,
2544              dlm->reco.dead_node, dlm->reco.new_master);
2545
2546         dlm_put(dlm);
2547         return 0;
2548 }
2549
2550 #define DLM_FINALIZE_STAGE2  0x01
2551 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2552 {
2553         int ret = 0;
2554         struct dlm_finalize_reco fr;
2555         struct dlm_node_iter iter;
2556         int nodenum;
2557         int status;
2558         int stage = 1;
2559
2560         mlog(0, "finishing recovery for node %s:%u, "
2561              "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2562
2563         spin_lock(&dlm->spinlock);
2564         dlm_node_iter_init(dlm->domain_map, &iter);
2565         spin_unlock(&dlm->spinlock);
2566
2567 stage2:
2568         memset(&fr, 0, sizeof(fr));
2569         fr.node_idx = dlm->node_num;
2570         fr.dead_node = dlm->reco.dead_node;
2571         if (stage == 2)
2572                 fr.flags |= DLM_FINALIZE_STAGE2;
2573
2574         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2575                 if (nodenum == dlm->node_num)
2576                         continue;
2577                 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2578                                          &fr, sizeof(fr), nodenum, &status);
2579                 if (ret >= 0)
2580                         ret = status;
2581                 if (ret < 0) {
2582                         mlog_errno(ret);
2583                         if (dlm_is_host_down(ret)) {
2584                                 /* this has no effect on this recovery 
2585                                  * session, so set the status to zero to 
2586                                  * finish out the last recovery */
2587                                 mlog(ML_ERROR, "node %u went down after this "
2588                                      "node finished recovery.\n", nodenum);
2589                                 ret = 0;
2590                                 continue;
2591                         }
2592                         break;
2593                 }
2594         }
2595         if (stage == 1) {
2596                 /* reset the node_iter back to the top and send finalize2 */
2597                 iter.curnode = -1;
2598                 stage = 2;
2599                 goto stage2;
2600         }
2601
2602         return ret;
2603 }
2604
2605 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2606 {
2607         struct dlm_ctxt *dlm = data;
2608         struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2609         int stage = 1;
2610
2611         /* ok to return 0, domain has gone away */
2612         if (!dlm_grab(dlm))
2613                 return 0;
2614
2615         if (fr->flags & DLM_FINALIZE_STAGE2)
2616                 stage = 2;
2617
2618         mlog(0, "%s: node %u finalizing recovery stage%d of "
2619              "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2620              fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2621  
2622         spin_lock(&dlm->spinlock);
2623
2624         if (dlm->reco.new_master != fr->node_idx) {
2625                 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2626                      "%u is supposed to be the new master, dead=%u\n",
2627                      fr->node_idx, dlm->reco.new_master, fr->dead_node);
2628                 BUG();
2629         }
2630         if (dlm->reco.dead_node != fr->dead_node) {
2631                 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2632                      "node %u, but node %u is supposed to be dead\n",
2633                      fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2634                 BUG();
2635         }
2636
2637         switch (stage) {
2638                 case 1:
2639                         dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2640                         if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2641                                 mlog(ML_ERROR, "%s: received finalize1 from "
2642                                      "new master %u for dead node %u, but "
2643                                      "this node has already received it!\n",
2644                                      dlm->name, fr->node_idx, fr->dead_node);
2645                                 dlm_print_reco_node_status(dlm);
2646                                 BUG();
2647                         }
2648                         dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2649                         spin_unlock(&dlm->spinlock);
2650                         break;
2651                 case 2:
2652                         if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2653                                 mlog(ML_ERROR, "%s: received finalize2 from "
2654                                      "new master %u for dead node %u, but "
2655                                      "this node did not have finalize1!\n",
2656                                      dlm->name, fr->node_idx, fr->dead_node);
2657                                 dlm_print_reco_node_status(dlm);
2658                                 BUG();
2659                         }
2660                         dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2661                         spin_unlock(&dlm->spinlock);
2662                         dlm_reset_recovery(dlm);
2663                         dlm_kick_recovery_thread(dlm);
2664                         break;
2665                 default:
2666                         BUG();
2667         }
2668
2669         mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2670              dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2671
2672         dlm_put(dlm);
2673         return 0;
2674 }