Merge tag 'for-4.21/libata-20190102' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2    drbd_receiver.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25
26 #include <linux/module.h>
27
28 #include <linux/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <uapi/linux/sched/types.h>
40 #include <linux/sched/signal.h>
41 #include <linux/pkt_sched.h>
42 #define __KERNEL_SYSCALLS__
43 #include <linux/unistd.h>
44 #include <linux/vmalloc.h>
45 #include <linux/random.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
48 #include "drbd_int.h"
49 #include "drbd_protocol.h"
50 #include "drbd_req.h"
51 #include "drbd_vli.h"
52
53 #define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME)
54
55 struct packet_info {
56         enum drbd_packet cmd;
57         unsigned int size;
58         unsigned int vnr;
59         void *data;
60 };
61
62 enum finish_epoch {
63         FE_STILL_LIVE,
64         FE_DESTROYED,
65         FE_RECYCLED,
66 };
67
68 static int drbd_do_features(struct drbd_connection *connection);
69 static int drbd_do_auth(struct drbd_connection *connection);
70 static int drbd_disconnected(struct drbd_peer_device *);
71 static void conn_wait_active_ee_empty(struct drbd_connection *connection);
72 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
73 static int e_end_block(struct drbd_work *, int);
74
75
76 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
77
78 /*
79  * some helper functions to deal with single linked page lists,
80  * page->private being our "next" pointer.
81  */
82
83 /* If at least n pages are linked at head, get n pages off.
84  * Otherwise, don't modify head, and return NULL.
85  * Locking is the responsibility of the caller.
86  */
87 static struct page *page_chain_del(struct page **head, int n)
88 {
89         struct page *page;
90         struct page *tmp;
91
92         BUG_ON(!n);
93         BUG_ON(!head);
94
95         page = *head;
96
97         if (!page)
98                 return NULL;
99
100         while (page) {
101                 tmp = page_chain_next(page);
102                 if (--n == 0)
103                         break; /* found sufficient pages */
104                 if (tmp == NULL)
105                         /* insufficient pages, don't use any of them. */
106                         return NULL;
107                 page = tmp;
108         }
109
110         /* add end of list marker for the returned list */
111         set_page_private(page, 0);
112         /* actual return value, and adjustment of head */
113         page = *head;
114         *head = tmp;
115         return page;
116 }
117
118 /* may be used outside of locks to find the tail of a (usually short)
119  * "private" page chain, before adding it back to a global chain head
120  * with page_chain_add() under a spinlock. */
121 static struct page *page_chain_tail(struct page *page, int *len)
122 {
123         struct page *tmp;
124         int i = 1;
125         while ((tmp = page_chain_next(page)))
126                 ++i, page = tmp;
127         if (len)
128                 *len = i;
129         return page;
130 }
131
132 static int page_chain_free(struct page *page)
133 {
134         struct page *tmp;
135         int i = 0;
136         page_chain_for_each_safe(page, tmp) {
137                 put_page(page);
138                 ++i;
139         }
140         return i;
141 }
142
143 static void page_chain_add(struct page **head,
144                 struct page *chain_first, struct page *chain_last)
145 {
146 #if 1
147         struct page *tmp;
148         tmp = page_chain_tail(chain_first, NULL);
149         BUG_ON(tmp != chain_last);
150 #endif
151
152         /* add chain to head */
153         set_page_private(chain_last, (unsigned long)*head);
154         *head = chain_first;
155 }
156
157 static struct page *__drbd_alloc_pages(struct drbd_device *device,
158                                        unsigned int number)
159 {
160         struct page *page = NULL;
161         struct page *tmp = NULL;
162         unsigned int i = 0;
163
164         /* Yes, testing drbd_pp_vacant outside the lock is racy.
165          * So what. It saves a spin_lock. */
166         if (drbd_pp_vacant >= number) {
167                 spin_lock(&drbd_pp_lock);
168                 page = page_chain_del(&drbd_pp_pool, number);
169                 if (page)
170                         drbd_pp_vacant -= number;
171                 spin_unlock(&drbd_pp_lock);
172                 if (page)
173                         return page;
174         }
175
176         /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
177          * "criss-cross" setup, that might cause write-out on some other DRBD,
178          * which in turn might block on the other node at this very place.  */
179         for (i = 0; i < number; i++) {
180                 tmp = alloc_page(GFP_TRY);
181                 if (!tmp)
182                         break;
183                 set_page_private(tmp, (unsigned long)page);
184                 page = tmp;
185         }
186
187         if (i == number)
188                 return page;
189
190         /* Not enough pages immediately available this time.
191          * No need to jump around here, drbd_alloc_pages will retry this
192          * function "soon". */
193         if (page) {
194                 tmp = page_chain_tail(page, NULL);
195                 spin_lock(&drbd_pp_lock);
196                 page_chain_add(&drbd_pp_pool, page, tmp);
197                 drbd_pp_vacant += i;
198                 spin_unlock(&drbd_pp_lock);
199         }
200         return NULL;
201 }
202
203 static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
204                                            struct list_head *to_be_freed)
205 {
206         struct drbd_peer_request *peer_req, *tmp;
207
208         /* The EEs are always appended to the end of the list. Since
209            they are sent in order over the wire, they have to finish
210            in order. As soon as we see the first not finished we can
211            stop to examine the list... */
212
213         list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
214                 if (drbd_peer_req_has_active_page(peer_req))
215                         break;
216                 list_move(&peer_req->w.list, to_be_freed);
217         }
218 }
219
220 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
221 {
222         LIST_HEAD(reclaimed);
223         struct drbd_peer_request *peer_req, *t;
224
225         spin_lock_irq(&device->resource->req_lock);
226         reclaim_finished_net_peer_reqs(device, &reclaimed);
227         spin_unlock_irq(&device->resource->req_lock);
228         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
229                 drbd_free_net_peer_req(device, peer_req);
230 }
231
232 static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
233 {
234         struct drbd_peer_device *peer_device;
235         int vnr;
236
237         rcu_read_lock();
238         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
239                 struct drbd_device *device = peer_device->device;
240                 if (!atomic_read(&device->pp_in_use_by_net))
241                         continue;
242
243                 kref_get(&device->kref);
244                 rcu_read_unlock();
245                 drbd_reclaim_net_peer_reqs(device);
246                 kref_put(&device->kref, drbd_destroy_device);
247                 rcu_read_lock();
248         }
249         rcu_read_unlock();
250 }
251
252 /**
253  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
254  * @device:     DRBD device.
255  * @number:     number of pages requested
256  * @retry:      whether to retry, if not enough pages are available right now
257  *
258  * Tries to allocate number pages, first from our own page pool, then from
259  * the kernel.
260  * Possibly retry until DRBD frees sufficient pages somewhere else.
261  *
262  * If this allocation would exceed the max_buffers setting, we throttle
263  * allocation (schedule_timeout) to give the system some room to breathe.
264  *
265  * We do not use max-buffers as hard limit, because it could lead to
266  * congestion and further to a distributed deadlock during online-verify or
267  * (checksum based) resync, if the max-buffers, socket buffer sizes and
268  * resync-rate settings are mis-configured.
269  *
270  * Returns a page chain linked via page->private.
271  */
272 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
273                               bool retry)
274 {
275         struct drbd_device *device = peer_device->device;
276         struct page *page = NULL;
277         struct net_conf *nc;
278         DEFINE_WAIT(wait);
279         unsigned int mxb;
280
281         rcu_read_lock();
282         nc = rcu_dereference(peer_device->connection->net_conf);
283         mxb = nc ? nc->max_buffers : 1000000;
284         rcu_read_unlock();
285
286         if (atomic_read(&device->pp_in_use) < mxb)
287                 page = __drbd_alloc_pages(device, number);
288
289         /* Try to keep the fast path fast, but occasionally we need
290          * to reclaim the pages we lended to the network stack. */
291         if (page && atomic_read(&device->pp_in_use_by_net) > 512)
292                 drbd_reclaim_net_peer_reqs(device);
293
294         while (page == NULL) {
295                 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
296
297                 drbd_reclaim_net_peer_reqs(device);
298
299                 if (atomic_read(&device->pp_in_use) < mxb) {
300                         page = __drbd_alloc_pages(device, number);
301                         if (page)
302                                 break;
303                 }
304
305                 if (!retry)
306                         break;
307
308                 if (signal_pending(current)) {
309                         drbd_warn(device, "drbd_alloc_pages interrupted!\n");
310                         break;
311                 }
312
313                 if (schedule_timeout(HZ/10) == 0)
314                         mxb = UINT_MAX;
315         }
316         finish_wait(&drbd_pp_wait, &wait);
317
318         if (page)
319                 atomic_add(number, &device->pp_in_use);
320         return page;
321 }
322
323 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
324  * Is also used from inside an other spin_lock_irq(&resource->req_lock);
325  * Either links the page chain back to the global pool,
326  * or returns all pages to the system. */
327 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
328 {
329         atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
330         int i;
331
332         if (page == NULL)
333                 return;
334
335         if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
336                 i = page_chain_free(page);
337         else {
338                 struct page *tmp;
339                 tmp = page_chain_tail(page, &i);
340                 spin_lock(&drbd_pp_lock);
341                 page_chain_add(&drbd_pp_pool, page, tmp);
342                 drbd_pp_vacant += i;
343                 spin_unlock(&drbd_pp_lock);
344         }
345         i = atomic_sub_return(i, a);
346         if (i < 0)
347                 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
348                         is_net ? "pp_in_use_by_net" : "pp_in_use", i);
349         wake_up(&drbd_pp_wait);
350 }
351
352 /*
353 You need to hold the req_lock:
354  _drbd_wait_ee_list_empty()
355
356 You must not have the req_lock:
357  drbd_free_peer_req()
358  drbd_alloc_peer_req()
359  drbd_free_peer_reqs()
360  drbd_ee_fix_bhs()
361  drbd_finish_peer_reqs()
362  drbd_clear_done_ee()
363  drbd_wait_ee_list_empty()
364 */
365
366 /* normal: payload_size == request size (bi_size)
367  * w_same: payload_size == logical_block_size
368  * trim: payload_size == 0 */
369 struct drbd_peer_request *
370 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
371                     unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
372 {
373         struct drbd_device *device = peer_device->device;
374         struct drbd_peer_request *peer_req;
375         struct page *page = NULL;
376         unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
377
378         if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
379                 return NULL;
380
381         peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
382         if (!peer_req) {
383                 if (!(gfp_mask & __GFP_NOWARN))
384                         drbd_err(device, "%s: allocation failed\n", __func__);
385                 return NULL;
386         }
387
388         if (nr_pages) {
389                 page = drbd_alloc_pages(peer_device, nr_pages,
390                                         gfpflags_allow_blocking(gfp_mask));
391                 if (!page)
392                         goto fail;
393         }
394
395         memset(peer_req, 0, sizeof(*peer_req));
396         INIT_LIST_HEAD(&peer_req->w.list);
397         drbd_clear_interval(&peer_req->i);
398         peer_req->i.size = request_size;
399         peer_req->i.sector = sector;
400         peer_req->submit_jif = jiffies;
401         peer_req->peer_device = peer_device;
402         peer_req->pages = page;
403         /*
404          * The block_id is opaque to the receiver.  It is not endianness
405          * converted, and sent back to the sender unchanged.
406          */
407         peer_req->block_id = id;
408
409         return peer_req;
410
411  fail:
412         mempool_free(peer_req, &drbd_ee_mempool);
413         return NULL;
414 }
415
416 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
417                        int is_net)
418 {
419         might_sleep();
420         if (peer_req->flags & EE_HAS_DIGEST)
421                 kfree(peer_req->digest);
422         drbd_free_pages(device, peer_req->pages, is_net);
423         D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
424         D_ASSERT(device, drbd_interval_empty(&peer_req->i));
425         if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
426                 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
427                 drbd_al_complete_io(device, &peer_req->i);
428         }
429         mempool_free(peer_req, &drbd_ee_mempool);
430 }
431
432 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
433 {
434         LIST_HEAD(work_list);
435         struct drbd_peer_request *peer_req, *t;
436         int count = 0;
437         int is_net = list == &device->net_ee;
438
439         spin_lock_irq(&device->resource->req_lock);
440         list_splice_init(list, &work_list);
441         spin_unlock_irq(&device->resource->req_lock);
442
443         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
444                 __drbd_free_peer_req(device, peer_req, is_net);
445                 count++;
446         }
447         return count;
448 }
449
450 /*
451  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
452  */
453 static int drbd_finish_peer_reqs(struct drbd_device *device)
454 {
455         LIST_HEAD(work_list);
456         LIST_HEAD(reclaimed);
457         struct drbd_peer_request *peer_req, *t;
458         int err = 0;
459
460         spin_lock_irq(&device->resource->req_lock);
461         reclaim_finished_net_peer_reqs(device, &reclaimed);
462         list_splice_init(&device->done_ee, &work_list);
463         spin_unlock_irq(&device->resource->req_lock);
464
465         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
466                 drbd_free_net_peer_req(device, peer_req);
467
468         /* possible callbacks here:
469          * e_end_block, and e_end_resync_block, e_send_superseded.
470          * all ignore the last argument.
471          */
472         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
473                 int err2;
474
475                 /* list_del not necessary, next/prev members not touched */
476                 err2 = peer_req->w.cb(&peer_req->w, !!err);
477                 if (!err)
478                         err = err2;
479                 drbd_free_peer_req(device, peer_req);
480         }
481         wake_up(&device->ee_wait);
482
483         return err;
484 }
485
486 static void _drbd_wait_ee_list_empty(struct drbd_device *device,
487                                      struct list_head *head)
488 {
489         DEFINE_WAIT(wait);
490
491         /* avoids spin_lock/unlock
492          * and calling prepare_to_wait in the fast path */
493         while (!list_empty(head)) {
494                 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
495                 spin_unlock_irq(&device->resource->req_lock);
496                 io_schedule();
497                 finish_wait(&device->ee_wait, &wait);
498                 spin_lock_irq(&device->resource->req_lock);
499         }
500 }
501
502 static void drbd_wait_ee_list_empty(struct drbd_device *device,
503                                     struct list_head *head)
504 {
505         spin_lock_irq(&device->resource->req_lock);
506         _drbd_wait_ee_list_empty(device, head);
507         spin_unlock_irq(&device->resource->req_lock);
508 }
509
510 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
511 {
512         struct kvec iov = {
513                 .iov_base = buf,
514                 .iov_len = size,
515         };
516         struct msghdr msg = {
517                 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
518         };
519         iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
520         return sock_recvmsg(sock, &msg, msg.msg_flags);
521 }
522
523 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
524 {
525         int rv;
526
527         rv = drbd_recv_short(connection->data.socket, buf, size, 0);
528
529         if (rv < 0) {
530                 if (rv == -ECONNRESET)
531                         drbd_info(connection, "sock was reset by peer\n");
532                 else if (rv != -ERESTARTSYS)
533                         drbd_err(connection, "sock_recvmsg returned %d\n", rv);
534         } else if (rv == 0) {
535                 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
536                         long t;
537                         rcu_read_lock();
538                         t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
539                         rcu_read_unlock();
540
541                         t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
542
543                         if (t)
544                                 goto out;
545                 }
546                 drbd_info(connection, "sock was shut down by peer\n");
547         }
548
549         if (rv != size)
550                 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
551
552 out:
553         return rv;
554 }
555
556 static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
557 {
558         int err;
559
560         err = drbd_recv(connection, buf, size);
561         if (err != size) {
562                 if (err >= 0)
563                         err = -EIO;
564         } else
565                 err = 0;
566         return err;
567 }
568
569 static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
570 {
571         int err;
572
573         err = drbd_recv_all(connection, buf, size);
574         if (err && !signal_pending(current))
575                 drbd_warn(connection, "short read (expected size %d)\n", (int)size);
576         return err;
577 }
578
579 /* quoting tcp(7):
580  *   On individual connections, the socket buffer size must be set prior to the
581  *   listen(2) or connect(2) calls in order to have it take effect.
582  * This is our wrapper to do so.
583  */
584 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
585                 unsigned int rcv)
586 {
587         /* open coded SO_SNDBUF, SO_RCVBUF */
588         if (snd) {
589                 sock->sk->sk_sndbuf = snd;
590                 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591         }
592         if (rcv) {
593                 sock->sk->sk_rcvbuf = rcv;
594                 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
595         }
596 }
597
598 static struct socket *drbd_try_connect(struct drbd_connection *connection)
599 {
600         const char *what;
601         struct socket *sock;
602         struct sockaddr_in6 src_in6;
603         struct sockaddr_in6 peer_in6;
604         struct net_conf *nc;
605         int err, peer_addr_len, my_addr_len;
606         int sndbuf_size, rcvbuf_size, connect_int;
607         int disconnect_on_error = 1;
608
609         rcu_read_lock();
610         nc = rcu_dereference(connection->net_conf);
611         if (!nc) {
612                 rcu_read_unlock();
613                 return NULL;
614         }
615         sndbuf_size = nc->sndbuf_size;
616         rcvbuf_size = nc->rcvbuf_size;
617         connect_int = nc->connect_int;
618         rcu_read_unlock();
619
620         my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
621         memcpy(&src_in6, &connection->my_addr, my_addr_len);
622
623         if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
624                 src_in6.sin6_port = 0;
625         else
626                 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
627
628         peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
629         memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
630
631         what = "sock_create_kern";
632         err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
633                                SOCK_STREAM, IPPROTO_TCP, &sock);
634         if (err < 0) {
635                 sock = NULL;
636                 goto out;
637         }
638
639         sock->sk->sk_rcvtimeo =
640         sock->sk->sk_sndtimeo = connect_int * HZ;
641         drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
642
643        /* explicitly bind to the configured IP as source IP
644         *  for the outgoing connections.
645         *  This is needed for multihomed hosts and to be
646         *  able to use lo: interfaces for drbd.
647         * Make sure to use 0 as port number, so linux selects
648         *  a free one dynamically.
649         */
650         what = "bind before connect";
651         err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
652         if (err < 0)
653                 goto out;
654
655         /* connect may fail, peer not yet available.
656          * stay C_WF_CONNECTION, don't go Disconnecting! */
657         disconnect_on_error = 0;
658         what = "connect";
659         err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
660
661 out:
662         if (err < 0) {
663                 if (sock) {
664                         sock_release(sock);
665                         sock = NULL;
666                 }
667                 switch (-err) {
668                         /* timeout, busy, signal pending */
669                 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
670                 case EINTR: case ERESTARTSYS:
671                         /* peer not (yet) available, network problem */
672                 case ECONNREFUSED: case ENETUNREACH:
673                 case EHOSTDOWN:    case EHOSTUNREACH:
674                         disconnect_on_error = 0;
675                         break;
676                 default:
677                         drbd_err(connection, "%s failed, err = %d\n", what, err);
678                 }
679                 if (disconnect_on_error)
680                         conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
681         }
682
683         return sock;
684 }
685
686 struct accept_wait_data {
687         struct drbd_connection *connection;
688         struct socket *s_listen;
689         struct completion door_bell;
690         void (*original_sk_state_change)(struct sock *sk);
691
692 };
693
694 static void drbd_incoming_connection(struct sock *sk)
695 {
696         struct accept_wait_data *ad = sk->sk_user_data;
697         void (*state_change)(struct sock *sk);
698
699         state_change = ad->original_sk_state_change;
700         if (sk->sk_state == TCP_ESTABLISHED)
701                 complete(&ad->door_bell);
702         state_change(sk);
703 }
704
705 static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
706 {
707         int err, sndbuf_size, rcvbuf_size, my_addr_len;
708         struct sockaddr_in6 my_addr;
709         struct socket *s_listen;
710         struct net_conf *nc;
711         const char *what;
712
713         rcu_read_lock();
714         nc = rcu_dereference(connection->net_conf);
715         if (!nc) {
716                 rcu_read_unlock();
717                 return -EIO;
718         }
719         sndbuf_size = nc->sndbuf_size;
720         rcvbuf_size = nc->rcvbuf_size;
721         rcu_read_unlock();
722
723         my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
724         memcpy(&my_addr, &connection->my_addr, my_addr_len);
725
726         what = "sock_create_kern";
727         err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
728                                SOCK_STREAM, IPPROTO_TCP, &s_listen);
729         if (err) {
730                 s_listen = NULL;
731                 goto out;
732         }
733
734         s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
735         drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
736
737         what = "bind before listen";
738         err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
739         if (err < 0)
740                 goto out;
741
742         ad->s_listen = s_listen;
743         write_lock_bh(&s_listen->sk->sk_callback_lock);
744         ad->original_sk_state_change = s_listen->sk->sk_state_change;
745         s_listen->sk->sk_state_change = drbd_incoming_connection;
746         s_listen->sk->sk_user_data = ad;
747         write_unlock_bh(&s_listen->sk->sk_callback_lock);
748
749         what = "listen";
750         err = s_listen->ops->listen(s_listen, 5);
751         if (err < 0)
752                 goto out;
753
754         return 0;
755 out:
756         if (s_listen)
757                 sock_release(s_listen);
758         if (err < 0) {
759                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
760                         drbd_err(connection, "%s failed, err = %d\n", what, err);
761                         conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
762                 }
763         }
764
765         return -EIO;
766 }
767
768 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
769 {
770         write_lock_bh(&sk->sk_callback_lock);
771         sk->sk_state_change = ad->original_sk_state_change;
772         sk->sk_user_data = NULL;
773         write_unlock_bh(&sk->sk_callback_lock);
774 }
775
776 static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
777 {
778         int timeo, connect_int, err = 0;
779         struct socket *s_estab = NULL;
780         struct net_conf *nc;
781
782         rcu_read_lock();
783         nc = rcu_dereference(connection->net_conf);
784         if (!nc) {
785                 rcu_read_unlock();
786                 return NULL;
787         }
788         connect_int = nc->connect_int;
789         rcu_read_unlock();
790
791         timeo = connect_int * HZ;
792         /* 28.5% random jitter */
793         timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
794
795         err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
796         if (err <= 0)
797                 return NULL;
798
799         err = kernel_accept(ad->s_listen, &s_estab, 0);
800         if (err < 0) {
801                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
802                         drbd_err(connection, "accept failed, err = %d\n", err);
803                         conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
804                 }
805         }
806
807         if (s_estab)
808                 unregister_state_change(s_estab->sk, ad);
809
810         return s_estab;
811 }
812
813 static int decode_header(struct drbd_connection *, void *, struct packet_info *);
814
815 static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
816                              enum drbd_packet cmd)
817 {
818         if (!conn_prepare_command(connection, sock))
819                 return -EIO;
820         return conn_send_command(connection, sock, cmd, 0, NULL, 0);
821 }
822
823 static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
824 {
825         unsigned int header_size = drbd_header_size(connection);
826         struct packet_info pi;
827         struct net_conf *nc;
828         int err;
829
830         rcu_read_lock();
831         nc = rcu_dereference(connection->net_conf);
832         if (!nc) {
833                 rcu_read_unlock();
834                 return -EIO;
835         }
836         sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
837         rcu_read_unlock();
838
839         err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
840         if (err != header_size) {
841                 if (err >= 0)
842                         err = -EIO;
843                 return err;
844         }
845         err = decode_header(connection, connection->data.rbuf, &pi);
846         if (err)
847                 return err;
848         return pi.cmd;
849 }
850
851 /**
852  * drbd_socket_okay() - Free the socket if its connection is not okay
853  * @sock:       pointer to the pointer to the socket.
854  */
855 static bool drbd_socket_okay(struct socket **sock)
856 {
857         int rr;
858         char tb[4];
859
860         if (!*sock)
861                 return false;
862
863         rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
864
865         if (rr > 0 || rr == -EAGAIN) {
866                 return true;
867         } else {
868                 sock_release(*sock);
869                 *sock = NULL;
870                 return false;
871         }
872 }
873
874 static bool connection_established(struct drbd_connection *connection,
875                                    struct socket **sock1,
876                                    struct socket **sock2)
877 {
878         struct net_conf *nc;
879         int timeout;
880         bool ok;
881
882         if (!*sock1 || !*sock2)
883                 return false;
884
885         rcu_read_lock();
886         nc = rcu_dereference(connection->net_conf);
887         timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
888         rcu_read_unlock();
889         schedule_timeout_interruptible(timeout);
890
891         ok = drbd_socket_okay(sock1);
892         ok = drbd_socket_okay(sock2) && ok;
893
894         return ok;
895 }
896
897 /* Gets called if a connection is established, or if a new minor gets created
898    in a connection */
899 int drbd_connected(struct drbd_peer_device *peer_device)
900 {
901         struct drbd_device *device = peer_device->device;
902         int err;
903
904         atomic_set(&device->packet_seq, 0);
905         device->peer_seq = 0;
906
907         device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
908                 &peer_device->connection->cstate_mutex :
909                 &device->own_state_mutex;
910
911         err = drbd_send_sync_param(peer_device);
912         if (!err)
913                 err = drbd_send_sizes(peer_device, 0, 0);
914         if (!err)
915                 err = drbd_send_uuids(peer_device);
916         if (!err)
917                 err = drbd_send_current_state(peer_device);
918         clear_bit(USE_DEGR_WFC_T, &device->flags);
919         clear_bit(RESIZE_PENDING, &device->flags);
920         atomic_set(&device->ap_in_flight, 0);
921         mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
922         return err;
923 }
924
925 /*
926  * return values:
927  *   1 yes, we have a valid connection
928  *   0 oops, did not work out, please try again
929  *  -1 peer talks different language,
930  *     no point in trying again, please go standalone.
931  *  -2 We do not have a network config...
932  */
933 static int conn_connect(struct drbd_connection *connection)
934 {
935         struct drbd_socket sock, msock;
936         struct drbd_peer_device *peer_device;
937         struct net_conf *nc;
938         int vnr, timeout, h;
939         bool discard_my_data, ok;
940         enum drbd_state_rv rv;
941         struct accept_wait_data ad = {
942                 .connection = connection,
943                 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
944         };
945
946         clear_bit(DISCONNECT_SENT, &connection->flags);
947         if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
948                 return -2;
949
950         mutex_init(&sock.mutex);
951         sock.sbuf = connection->data.sbuf;
952         sock.rbuf = connection->data.rbuf;
953         sock.socket = NULL;
954         mutex_init(&msock.mutex);
955         msock.sbuf = connection->meta.sbuf;
956         msock.rbuf = connection->meta.rbuf;
957         msock.socket = NULL;
958
959         /* Assume that the peer only understands protocol 80 until we know better.  */
960         connection->agreed_pro_version = 80;
961
962         if (prepare_listen_socket(connection, &ad))
963                 return 0;
964
965         do {
966                 struct socket *s;
967
968                 s = drbd_try_connect(connection);
969                 if (s) {
970                         if (!sock.socket) {
971                                 sock.socket = s;
972                                 send_first_packet(connection, &sock, P_INITIAL_DATA);
973                         } else if (!msock.socket) {
974                                 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
975                                 msock.socket = s;
976                                 send_first_packet(connection, &msock, P_INITIAL_META);
977                         } else {
978                                 drbd_err(connection, "Logic error in conn_connect()\n");
979                                 goto out_release_sockets;
980                         }
981                 }
982
983                 if (connection_established(connection, &sock.socket, &msock.socket))
984                         break;
985
986 retry:
987                 s = drbd_wait_for_connect(connection, &ad);
988                 if (s) {
989                         int fp = receive_first_packet(connection, s);
990                         drbd_socket_okay(&sock.socket);
991                         drbd_socket_okay(&msock.socket);
992                         switch (fp) {
993                         case P_INITIAL_DATA:
994                                 if (sock.socket) {
995                                         drbd_warn(connection, "initial packet S crossed\n");
996                                         sock_release(sock.socket);
997                                         sock.socket = s;
998                                         goto randomize;
999                                 }
1000                                 sock.socket = s;
1001                                 break;
1002                         case P_INITIAL_META:
1003                                 set_bit(RESOLVE_CONFLICTS, &connection->flags);
1004                                 if (msock.socket) {
1005                                         drbd_warn(connection, "initial packet M crossed\n");
1006                                         sock_release(msock.socket);
1007                                         msock.socket = s;
1008                                         goto randomize;
1009                                 }
1010                                 msock.socket = s;
1011                                 break;
1012                         default:
1013                                 drbd_warn(connection, "Error receiving initial packet\n");
1014                                 sock_release(s);
1015 randomize:
1016                                 if (prandom_u32() & 1)
1017                                         goto retry;
1018                         }
1019                 }
1020
1021                 if (connection->cstate <= C_DISCONNECTING)
1022                         goto out_release_sockets;
1023                 if (signal_pending(current)) {
1024                         flush_signals(current);
1025                         smp_rmb();
1026                         if (get_t_state(&connection->receiver) == EXITING)
1027                                 goto out_release_sockets;
1028                 }
1029
1030                 ok = connection_established(connection, &sock.socket, &msock.socket);
1031         } while (!ok);
1032
1033         if (ad.s_listen)
1034                 sock_release(ad.s_listen);
1035
1036         sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1037         msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1038
1039         sock.socket->sk->sk_allocation = GFP_NOIO;
1040         msock.socket->sk->sk_allocation = GFP_NOIO;
1041
1042         sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1043         msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
1044
1045         /* NOT YET ...
1046          * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
1047          * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1048          * first set it to the P_CONNECTION_FEATURES timeout,
1049          * which we set to 4x the configured ping_timeout. */
1050         rcu_read_lock();
1051         nc = rcu_dereference(connection->net_conf);
1052
1053         sock.socket->sk->sk_sndtimeo =
1054         sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
1055
1056         msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1057         timeout = nc->timeout * HZ / 10;
1058         discard_my_data = nc->discard_my_data;
1059         rcu_read_unlock();
1060
1061         msock.socket->sk->sk_sndtimeo = timeout;
1062
1063         /* we don't want delays.
1064          * we use TCP_CORK where appropriate, though */
1065         drbd_tcp_nodelay(sock.socket);
1066         drbd_tcp_nodelay(msock.socket);
1067
1068         connection->data.socket = sock.socket;
1069         connection->meta.socket = msock.socket;
1070         connection->last_received = jiffies;
1071
1072         h = drbd_do_features(connection);
1073         if (h <= 0)
1074                 return h;
1075
1076         if (connection->cram_hmac_tfm) {
1077                 /* drbd_request_state(device, NS(conn, WFAuth)); */
1078                 switch (drbd_do_auth(connection)) {
1079                 case -1:
1080                         drbd_err(connection, "Authentication of peer failed\n");
1081                         return -1;
1082                 case 0:
1083                         drbd_err(connection, "Authentication of peer failed, trying again.\n");
1084                         return 0;
1085                 }
1086         }
1087
1088         connection->data.socket->sk->sk_sndtimeo = timeout;
1089         connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1090
1091         if (drbd_send_protocol(connection) == -EOPNOTSUPP)
1092                 return -1;
1093
1094         /* Prevent a race between resync-handshake and
1095          * being promoted to Primary.
1096          *
1097          * Grab and release the state mutex, so we know that any current
1098          * drbd_set_role() is finished, and any incoming drbd_set_role
1099          * will see the STATE_SENT flag, and wait for it to be cleared.
1100          */
1101         idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1102                 mutex_lock(peer_device->device->state_mutex);
1103
1104         /* avoid a race with conn_request_state( C_DISCONNECTING ) */
1105         spin_lock_irq(&connection->resource->req_lock);
1106         set_bit(STATE_SENT, &connection->flags);
1107         spin_unlock_irq(&connection->resource->req_lock);
1108
1109         idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1110                 mutex_unlock(peer_device->device->state_mutex);
1111
1112         rcu_read_lock();
1113         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1114                 struct drbd_device *device = peer_device->device;
1115                 kref_get(&device->kref);
1116                 rcu_read_unlock();
1117
1118                 if (discard_my_data)
1119                         set_bit(DISCARD_MY_DATA, &device->flags);
1120                 else
1121                         clear_bit(DISCARD_MY_DATA, &device->flags);
1122
1123                 drbd_connected(peer_device);
1124                 kref_put(&device->kref, drbd_destroy_device);
1125                 rcu_read_lock();
1126         }
1127         rcu_read_unlock();
1128
1129         rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1130         if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1131                 clear_bit(STATE_SENT, &connection->flags);
1132                 return 0;
1133         }
1134
1135         drbd_thread_start(&connection->ack_receiver);
1136         /* opencoded create_singlethread_workqueue(),
1137          * to be able to use format string arguments */
1138         connection->ack_sender =
1139                 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
1140         if (!connection->ack_sender) {
1141                 drbd_err(connection, "Failed to create workqueue ack_sender\n");
1142                 return 0;
1143         }
1144
1145         mutex_lock(&connection->resource->conf_update);
1146         /* The discard_my_data flag is a single-shot modifier to the next
1147          * connection attempt, the handshake of which is now well underway.
1148          * No need for rcu style copying of the whole struct
1149          * just to clear a single value. */
1150         connection->net_conf->discard_my_data = 0;
1151         mutex_unlock(&connection->resource->conf_update);
1152
1153         return h;
1154
1155 out_release_sockets:
1156         if (ad.s_listen)
1157                 sock_release(ad.s_listen);
1158         if (sock.socket)
1159                 sock_release(sock.socket);
1160         if (msock.socket)
1161                 sock_release(msock.socket);
1162         return -1;
1163 }
1164
1165 static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
1166 {
1167         unsigned int header_size = drbd_header_size(connection);
1168
1169         if (header_size == sizeof(struct p_header100) &&
1170             *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1171                 struct p_header100 *h = header;
1172                 if (h->pad != 0) {
1173                         drbd_err(connection, "Header padding is not zero\n");
1174                         return -EINVAL;
1175                 }
1176                 pi->vnr = be16_to_cpu(h->volume);
1177                 pi->cmd = be16_to_cpu(h->command);
1178                 pi->size = be32_to_cpu(h->length);
1179         } else if (header_size == sizeof(struct p_header95) &&
1180                    *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1181                 struct p_header95 *h = header;
1182                 pi->cmd = be16_to_cpu(h->command);
1183                 pi->size = be32_to_cpu(h->length);
1184                 pi->vnr = 0;
1185         } else if (header_size == sizeof(struct p_header80) &&
1186                    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1187                 struct p_header80 *h = header;
1188                 pi->cmd = be16_to_cpu(h->command);
1189                 pi->size = be16_to_cpu(h->length);
1190                 pi->vnr = 0;
1191         } else {
1192                 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
1193                          be32_to_cpu(*(__be32 *)header),
1194                          connection->agreed_pro_version);
1195                 return -EINVAL;
1196         }
1197         pi->data = header + header_size;
1198         return 0;
1199 }
1200
1201 static void drbd_unplug_all_devices(struct drbd_connection *connection)
1202 {
1203         if (current->plug == &connection->receiver_plug) {
1204                 blk_finish_plug(&connection->receiver_plug);
1205                 blk_start_plug(&connection->receiver_plug);
1206         } /* else: maybe just schedule() ?? */
1207 }
1208
1209 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
1210 {
1211         void *buffer = connection->data.rbuf;
1212         int err;
1213
1214         err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
1215         if (err)
1216                 return err;
1217
1218         err = decode_header(connection, buffer, pi);
1219         connection->last_received = jiffies;
1220
1221         return err;
1222 }
1223
1224 static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1225 {
1226         void *buffer = connection->data.rbuf;
1227         unsigned int size = drbd_header_size(connection);
1228         int err;
1229
1230         err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1231         if (err != size) {
1232                 /* If we have nothing in the receive buffer now, to reduce
1233                  * application latency, try to drain the backend queues as
1234                  * quickly as possible, and let remote TCP know what we have
1235                  * received so far. */
1236                 if (err == -EAGAIN) {
1237                         drbd_tcp_quickack(connection->data.socket);
1238                         drbd_unplug_all_devices(connection);
1239                 }
1240                 if (err > 0) {
1241                         buffer += err;
1242                         size -= err;
1243                 }
1244                 err = drbd_recv_all_warn(connection, buffer, size);
1245                 if (err)
1246                         return err;
1247         }
1248
1249         err = decode_header(connection, connection->data.rbuf, pi);
1250         connection->last_received = jiffies;
1251
1252         return err;
1253 }
1254 /* This is blkdev_issue_flush, but asynchronous.
1255  * We want to submit to all component volumes in parallel,
1256  * then wait for all completions.
1257  */
1258 struct issue_flush_context {
1259         atomic_t pending;
1260         int error;
1261         struct completion done;
1262 };
1263 struct one_flush_context {
1264         struct drbd_device *device;
1265         struct issue_flush_context *ctx;
1266 };
1267
1268 static void one_flush_endio(struct bio *bio)
1269 {
1270         struct one_flush_context *octx = bio->bi_private;
1271         struct drbd_device *device = octx->device;
1272         struct issue_flush_context *ctx = octx->ctx;
1273
1274         if (bio->bi_status) {
1275                 ctx->error = blk_status_to_errno(bio->bi_status);
1276                 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
1277         }
1278         kfree(octx);
1279         bio_put(bio);
1280
1281         clear_bit(FLUSH_PENDING, &device->flags);
1282         put_ldev(device);
1283         kref_put(&device->kref, drbd_destroy_device);
1284
1285         if (atomic_dec_and_test(&ctx->pending))
1286                 complete(&ctx->done);
1287 }
1288
1289 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1290 {
1291         struct bio *bio = bio_alloc(GFP_NOIO, 0);
1292         struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
1293         if (!bio || !octx) {
1294                 drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
1295                 /* FIXME: what else can I do now?  disconnecting or detaching
1296                  * really does not help to improve the state of the world, either.
1297                  */
1298                 kfree(octx);
1299                 if (bio)
1300                         bio_put(bio);
1301
1302                 ctx->error = -ENOMEM;
1303                 put_ldev(device);
1304                 kref_put(&device->kref, drbd_destroy_device);
1305                 return;
1306         }
1307
1308         octx->device = device;
1309         octx->ctx = ctx;
1310         bio_set_dev(bio, device->ldev->backing_bdev);
1311         bio->bi_private = octx;
1312         bio->bi_end_io = one_flush_endio;
1313         bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
1314
1315         device->flush_jif = jiffies;
1316         set_bit(FLUSH_PENDING, &device->flags);
1317         atomic_inc(&ctx->pending);
1318         submit_bio(bio);
1319 }
1320
1321 static void drbd_flush(struct drbd_connection *connection)
1322 {
1323         if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
1324                 struct drbd_peer_device *peer_device;
1325                 struct issue_flush_context ctx;
1326                 int vnr;
1327
1328                 atomic_set(&ctx.pending, 1);
1329                 ctx.error = 0;
1330                 init_completion(&ctx.done);
1331
1332                 rcu_read_lock();
1333                 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1334                         struct drbd_device *device = peer_device->device;
1335
1336                         if (!get_ldev(device))
1337                                 continue;
1338                         kref_get(&device->kref);
1339                         rcu_read_unlock();
1340
1341                         submit_one_flush(device, &ctx);
1342
1343                         rcu_read_lock();
1344                 }
1345                 rcu_read_unlock();
1346
1347                 /* Do we want to add a timeout,
1348                  * if disk-timeout is set? */
1349                 if (!atomic_dec_and_test(&ctx.pending))
1350                         wait_for_completion(&ctx.done);
1351
1352                 if (ctx.error) {
1353                         /* would rather check on EOPNOTSUPP, but that is not reliable.
1354                          * don't try again for ANY return value != 0
1355                          * if (rv == -EOPNOTSUPP) */
1356                         /* Any error is already reported by bio_endio callback. */
1357                         drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1358                 }
1359         }
1360 }
1361
1362 /**
1363  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1364  * @device:     DRBD device.
1365  * @epoch:      Epoch object.
1366  * @ev:         Epoch event.
1367  */
1368 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
1369                                                struct drbd_epoch *epoch,
1370                                                enum epoch_event ev)
1371 {
1372         int epoch_size;
1373         struct drbd_epoch *next_epoch;
1374         enum finish_epoch rv = FE_STILL_LIVE;
1375
1376         spin_lock(&connection->epoch_lock);
1377         do {
1378                 next_epoch = NULL;
1379
1380                 epoch_size = atomic_read(&epoch->epoch_size);
1381
1382                 switch (ev & ~EV_CLEANUP) {
1383                 case EV_PUT:
1384                         atomic_dec(&epoch->active);
1385                         break;
1386                 case EV_GOT_BARRIER_NR:
1387                         set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1388                         break;
1389                 case EV_BECAME_LAST:
1390                         /* nothing to do*/
1391                         break;
1392                 }
1393
1394                 if (epoch_size != 0 &&
1395                     atomic_read(&epoch->active) == 0 &&
1396                     (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1397                         if (!(ev & EV_CLEANUP)) {
1398                                 spin_unlock(&connection->epoch_lock);
1399                                 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1400                                 spin_lock(&connection->epoch_lock);
1401                         }
1402 #if 0
1403                         /* FIXME: dec unacked on connection, once we have
1404                          * something to count pending connection packets in. */
1405                         if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1406                                 dec_unacked(epoch->connection);
1407 #endif
1408
1409                         if (connection->current_epoch != epoch) {
1410                                 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1411                                 list_del(&epoch->list);
1412                                 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1413                                 connection->epochs--;
1414                                 kfree(epoch);
1415
1416                                 if (rv == FE_STILL_LIVE)
1417                                         rv = FE_DESTROYED;
1418                         } else {
1419                                 epoch->flags = 0;
1420                                 atomic_set(&epoch->epoch_size, 0);
1421                                 /* atomic_set(&epoch->active, 0); is already zero */
1422                                 if (rv == FE_STILL_LIVE)
1423                                         rv = FE_RECYCLED;
1424                         }
1425                 }
1426
1427                 if (!next_epoch)
1428                         break;
1429
1430                 epoch = next_epoch;
1431         } while (1);
1432
1433         spin_unlock(&connection->epoch_lock);
1434
1435         return rv;
1436 }
1437
1438 static enum write_ordering_e
1439 max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1440 {
1441         struct disk_conf *dc;
1442
1443         dc = rcu_dereference(bdev->disk_conf);
1444
1445         if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1446                 wo = WO_DRAIN_IO;
1447         if (wo == WO_DRAIN_IO && !dc->disk_drain)
1448                 wo = WO_NONE;
1449
1450         return wo;
1451 }
1452
1453 /**
1454  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1455  * @connection: DRBD connection.
1456  * @wo:         Write ordering method to try.
1457  */
1458 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1459                               enum write_ordering_e wo)
1460 {
1461         struct drbd_device *device;
1462         enum write_ordering_e pwo;
1463         int vnr;
1464         static char *write_ordering_str[] = {
1465                 [WO_NONE] = "none",
1466                 [WO_DRAIN_IO] = "drain",
1467                 [WO_BDEV_FLUSH] = "flush",
1468         };
1469
1470         pwo = resource->write_ordering;
1471         if (wo != WO_BDEV_FLUSH)
1472                 wo = min(pwo, wo);
1473         rcu_read_lock();
1474         idr_for_each_entry(&resource->devices, device, vnr) {
1475                 if (get_ldev(device)) {
1476                         wo = max_allowed_wo(device->ldev, wo);
1477                         if (device->ldev == bdev)
1478                                 bdev = NULL;
1479                         put_ldev(device);
1480                 }
1481         }
1482
1483         if (bdev)
1484                 wo = max_allowed_wo(bdev, wo);
1485
1486         rcu_read_unlock();
1487
1488         resource->write_ordering = wo;
1489         if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
1490                 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
1491 }
1492
1493 static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
1494 {
1495         struct block_device *bdev = device->ldev->backing_bdev;
1496
1497         if (blkdev_issue_zeroout(bdev, peer_req->i.sector, peer_req->i.size >> 9,
1498                         GFP_NOIO, 0))
1499                 peer_req->flags |= EE_WAS_ERROR;
1500
1501         drbd_endio_write_sec_final(peer_req);
1502 }
1503
1504 static void drbd_issue_peer_wsame(struct drbd_device *device,
1505                                   struct drbd_peer_request *peer_req)
1506 {
1507         struct block_device *bdev = device->ldev->backing_bdev;
1508         sector_t s = peer_req->i.sector;
1509         sector_t nr = peer_req->i.size >> 9;
1510         if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
1511                 peer_req->flags |= EE_WAS_ERROR;
1512         drbd_endio_write_sec_final(peer_req);
1513 }
1514
1515
1516 /**
1517  * drbd_submit_peer_request()
1518  * @device:     DRBD device.
1519  * @peer_req:   peer request
1520  * @rw:         flag field, see bio->bi_opf
1521  *
1522  * May spread the pages to multiple bios,
1523  * depending on bio_add_page restrictions.
1524  *
1525  * Returns 0 if all bios have been submitted,
1526  * -ENOMEM if we could not allocate enough bios,
1527  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1528  *  single page to an empty bio (which should never happen and likely indicates
1529  *  that the lower level IO stack is in some way broken). This has been observed
1530  *  on certain Xen deployments.
1531  */
1532 /* TODO allocate from our own bio_set. */
1533 int drbd_submit_peer_request(struct drbd_device *device,
1534                              struct drbd_peer_request *peer_req,
1535                              const unsigned op, const unsigned op_flags,
1536                              const int fault_type)
1537 {
1538         struct bio *bios = NULL;
1539         struct bio *bio;
1540         struct page *page = peer_req->pages;
1541         sector_t sector = peer_req->i.sector;
1542         unsigned data_size = peer_req->i.size;
1543         unsigned n_bios = 0;
1544         unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
1545         int err = -ENOMEM;
1546
1547         /* TRIM/DISCARD: for now, always use the helper function
1548          * blkdev_issue_zeroout(..., discard=true).
1549          * It's synchronous, but it does the right thing wrt. bio splitting.
1550          * Correctness first, performance later.  Next step is to code an
1551          * asynchronous variant of the same.
1552          */
1553         if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) {
1554                 /* wait for all pending IO completions, before we start
1555                  * zeroing things out. */
1556                 conn_wait_active_ee_empty(peer_req->peer_device->connection);
1557                 /* add it to the active list now,
1558                  * so we can find it to present it in debugfs */
1559                 peer_req->submit_jif = jiffies;
1560                 peer_req->flags |= EE_SUBMITTED;
1561
1562                 /* If this was a resync request from receive_rs_deallocated(),
1563                  * it is already on the sync_ee list */
1564                 if (list_empty(&peer_req->w.list)) {
1565                         spin_lock_irq(&device->resource->req_lock);
1566                         list_add_tail(&peer_req->w.list, &device->active_ee);
1567                         spin_unlock_irq(&device->resource->req_lock);
1568                 }
1569
1570                 if (peer_req->flags & EE_IS_TRIM)
1571                         drbd_issue_peer_discard(device, peer_req);
1572                 else /* EE_WRITE_SAME */
1573                         drbd_issue_peer_wsame(device, peer_req);
1574                 return 0;
1575         }
1576
1577         /* In most cases, we will only need one bio.  But in case the lower
1578          * level restrictions happen to be different at this offset on this
1579          * side than those of the sending peer, we may need to submit the
1580          * request in more than one bio.
1581          *
1582          * Plain bio_alloc is good enough here, this is no DRBD internally
1583          * generated bio, but a bio allocated on behalf of the peer.
1584          */
1585 next_bio:
1586         bio = bio_alloc(GFP_NOIO, nr_pages);
1587         if (!bio) {
1588                 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
1589                 goto fail;
1590         }
1591         /* > peer_req->i.sector, unless this is the first bio */
1592         bio->bi_iter.bi_sector = sector;
1593         bio_set_dev(bio, device->ldev->backing_bdev);
1594         bio_set_op_attrs(bio, op, op_flags);
1595         bio->bi_private = peer_req;
1596         bio->bi_end_io = drbd_peer_request_endio;
1597
1598         bio->bi_next = bios;
1599         bios = bio;
1600         ++n_bios;
1601
1602         page_chain_for_each(page) {
1603                 unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
1604                 if (!bio_add_page(bio, page, len, 0))
1605                         goto next_bio;
1606                 data_size -= len;
1607                 sector += len >> 9;
1608                 --nr_pages;
1609         }
1610         D_ASSERT(device, data_size == 0);
1611         D_ASSERT(device, page == NULL);
1612
1613         atomic_set(&peer_req->pending_bios, n_bios);
1614         /* for debugfs: update timestamp, mark as submitted */
1615         peer_req->submit_jif = jiffies;
1616         peer_req->flags |= EE_SUBMITTED;
1617         do {
1618                 bio = bios;
1619                 bios = bios->bi_next;
1620                 bio->bi_next = NULL;
1621
1622                 drbd_generic_make_request(device, fault_type, bio);
1623         } while (bios);
1624         return 0;
1625
1626 fail:
1627         while (bios) {
1628                 bio = bios;
1629                 bios = bios->bi_next;
1630                 bio_put(bio);
1631         }
1632         return err;
1633 }
1634
1635 static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1636                                              struct drbd_peer_request *peer_req)
1637 {
1638         struct drbd_interval *i = &peer_req->i;
1639
1640         drbd_remove_interval(&device->write_requests, i);
1641         drbd_clear_interval(i);
1642
1643         /* Wake up any processes waiting for this peer request to complete.  */
1644         if (i->waiting)
1645                 wake_up(&device->misc_wait);
1646 }
1647
1648 static void conn_wait_active_ee_empty(struct drbd_connection *connection)
1649 {
1650         struct drbd_peer_device *peer_device;
1651         int vnr;
1652
1653         rcu_read_lock();
1654         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1655                 struct drbd_device *device = peer_device->device;
1656
1657                 kref_get(&device->kref);
1658                 rcu_read_unlock();
1659                 drbd_wait_ee_list_empty(device, &device->active_ee);
1660                 kref_put(&device->kref, drbd_destroy_device);
1661                 rcu_read_lock();
1662         }
1663         rcu_read_unlock();
1664 }
1665
1666 static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
1667 {
1668         int rv;
1669         struct p_barrier *p = pi->data;
1670         struct drbd_epoch *epoch;
1671
1672         /* FIXME these are unacked on connection,
1673          * not a specific (peer)device.
1674          */
1675         connection->current_epoch->barrier_nr = p->barrier;
1676         connection->current_epoch->connection = connection;
1677         rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
1678
1679         /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1680          * the activity log, which means it would not be resynced in case the
1681          * R_PRIMARY crashes now.
1682          * Therefore we must send the barrier_ack after the barrier request was
1683          * completed. */
1684         switch (connection->resource->write_ordering) {
1685         case WO_NONE:
1686                 if (rv == FE_RECYCLED)
1687                         return 0;
1688
1689                 /* receiver context, in the writeout path of the other node.
1690                  * avoid potential distributed deadlock */
1691                 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1692                 if (epoch)
1693                         break;
1694                 else
1695                         drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
1696                         /* Fall through */
1697
1698         case WO_BDEV_FLUSH:
1699         case WO_DRAIN_IO:
1700                 conn_wait_active_ee_empty(connection);
1701                 drbd_flush(connection);
1702
1703                 if (atomic_read(&connection->current_epoch->epoch_size)) {
1704                         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1705                         if (epoch)
1706                                 break;
1707                 }
1708
1709                 return 0;
1710         default:
1711                 drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1712                          connection->resource->write_ordering);
1713                 return -EIO;
1714         }
1715
1716         epoch->flags = 0;
1717         atomic_set(&epoch->epoch_size, 0);
1718         atomic_set(&epoch->active, 0);
1719
1720         spin_lock(&connection->epoch_lock);
1721         if (atomic_read(&connection->current_epoch->epoch_size)) {
1722                 list_add(&epoch->list, &connection->current_epoch->list);
1723                 connection->current_epoch = epoch;
1724                 connection->epochs++;
1725         } else {
1726                 /* The current_epoch got recycled while we allocated this one... */
1727                 kfree(epoch);
1728         }
1729         spin_unlock(&connection->epoch_lock);
1730
1731         return 0;
1732 }
1733
1734 /* quick wrapper in case payload size != request_size (write same) */
1735 static void drbd_csum_ee_size(struct crypto_shash *h,
1736                               struct drbd_peer_request *r, void *d,
1737                               unsigned int payload_size)
1738 {
1739         unsigned int tmp = r->i.size;
1740         r->i.size = payload_size;
1741         drbd_csum_ee(h, r, d);
1742         r->i.size = tmp;
1743 }
1744
1745 /* used from receive_RSDataReply (recv_resync_read)
1746  * and from receive_Data.
1747  * data_size: actual payload ("data in")
1748  *      for normal writes that is bi_size.
1749  *      for discards, that is zero.
1750  *      for write same, it is logical_block_size.
1751  * both trim and write same have the bi_size ("data len to be affected")
1752  * as extra argument in the packet header.
1753  */
1754 static struct drbd_peer_request *
1755 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1756               struct packet_info *pi) __must_hold(local)
1757 {
1758         struct drbd_device *device = peer_device->device;
1759         const sector_t capacity = drbd_get_capacity(device->this_bdev);
1760         struct drbd_peer_request *peer_req;
1761         struct page *page;
1762         int digest_size, err;
1763         unsigned int data_size = pi->size, ds;
1764         void *dig_in = peer_device->connection->int_dig_in;
1765         void *dig_vv = peer_device->connection->int_dig_vv;
1766         unsigned long *data;
1767         struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
1768         struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
1769
1770         digest_size = 0;
1771         if (!trim && peer_device->connection->peer_integrity_tfm) {
1772                 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1773                 /*
1774                  * FIXME: Receive the incoming digest into the receive buffer
1775                  *        here, together with its struct p_data?
1776                  */
1777                 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1778                 if (err)
1779                         return NULL;
1780                 data_size -= digest_size;
1781         }
1782
1783         /* assume request_size == data_size, but special case trim and wsame. */
1784         ds = data_size;
1785         if (trim) {
1786                 if (!expect(data_size == 0))
1787                         return NULL;
1788                 ds = be32_to_cpu(trim->size);
1789         } else if (wsame) {
1790                 if (data_size != queue_logical_block_size(device->rq_queue)) {
1791                         drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
1792                                 data_size, queue_logical_block_size(device->rq_queue));
1793                         return NULL;
1794                 }
1795                 if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
1796                         drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
1797                                 data_size, bdev_logical_block_size(device->ldev->backing_bdev));
1798                         return NULL;
1799                 }
1800                 ds = be32_to_cpu(wsame->size);
1801         }
1802
1803         if (!expect(IS_ALIGNED(ds, 512)))
1804                 return NULL;
1805         if (trim || wsame) {
1806                 if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1807                         return NULL;
1808         } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
1809                 return NULL;
1810
1811         /* even though we trust out peer,
1812          * we sometimes have to double check. */
1813         if (sector + (ds>>9) > capacity) {
1814                 drbd_err(device, "request from peer beyond end of local disk: "
1815                         "capacity: %llus < sector: %llus + size: %u\n",
1816                         (unsigned long long)capacity,
1817                         (unsigned long long)sector, ds);
1818                 return NULL;
1819         }
1820
1821         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1822          * "criss-cross" setup, that might cause write-out on some other DRBD,
1823          * which in turn might block on the other node at this very place.  */
1824         peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
1825         if (!peer_req)
1826                 return NULL;
1827
1828         peer_req->flags |= EE_WRITE;
1829         if (trim) {
1830                 peer_req->flags |= EE_IS_TRIM;
1831                 return peer_req;
1832         }
1833         if (wsame)
1834                 peer_req->flags |= EE_WRITE_SAME;
1835
1836         /* receive payload size bytes into page chain */
1837         ds = data_size;
1838         page = peer_req->pages;
1839         page_chain_for_each(page) {
1840                 unsigned len = min_t(int, ds, PAGE_SIZE);
1841                 data = kmap(page);
1842                 err = drbd_recv_all_warn(peer_device->connection, data, len);
1843                 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1844                         drbd_err(device, "Fault injection: Corrupting data on receive\n");
1845                         data[0] = data[0] ^ (unsigned long)-1;
1846                 }
1847                 kunmap(page);
1848                 if (err) {
1849                         drbd_free_peer_req(device, peer_req);
1850                         return NULL;
1851                 }
1852                 ds -= len;
1853         }
1854
1855         if (digest_size) {
1856                 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
1857                 if (memcmp(dig_in, dig_vv, digest_size)) {
1858                         drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1859                                 (unsigned long long)sector, data_size);
1860                         drbd_free_peer_req(device, peer_req);
1861                         return NULL;
1862                 }
1863         }
1864         device->recv_cnt += data_size >> 9;
1865         return peer_req;
1866 }
1867
1868 /* drbd_drain_block() just takes a data block
1869  * out of the socket input buffer, and discards it.
1870  */
1871 static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
1872 {
1873         struct page *page;
1874         int err = 0;
1875         void *data;
1876
1877         if (!data_size)
1878                 return 0;
1879
1880         page = drbd_alloc_pages(peer_device, 1, 1);
1881
1882         data = kmap(page);
1883         while (data_size) {
1884                 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1885
1886                 err = drbd_recv_all_warn(peer_device->connection, data, len);
1887                 if (err)
1888                         break;
1889                 data_size -= len;
1890         }
1891         kunmap(page);
1892         drbd_free_pages(peer_device->device, page, 0);
1893         return err;
1894 }
1895
1896 static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
1897                            sector_t sector, int data_size)
1898 {
1899         struct bio_vec bvec;
1900         struct bvec_iter iter;
1901         struct bio *bio;
1902         int digest_size, err, expect;
1903         void *dig_in = peer_device->connection->int_dig_in;
1904         void *dig_vv = peer_device->connection->int_dig_vv;
1905
1906         digest_size = 0;
1907         if (peer_device->connection->peer_integrity_tfm) {
1908                 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1909                 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1910                 if (err)
1911                         return err;
1912                 data_size -= digest_size;
1913         }
1914
1915         /* optimistically update recv_cnt.  if receiving fails below,
1916          * we disconnect anyways, and counters will be reset. */
1917         peer_device->device->recv_cnt += data_size>>9;
1918
1919         bio = req->master_bio;
1920         D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
1921
1922         bio_for_each_segment(bvec, bio, iter) {
1923                 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1924                 expect = min_t(int, data_size, bvec.bv_len);
1925                 err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
1926                 kunmap(bvec.bv_page);
1927                 if (err)
1928                         return err;
1929                 data_size -= expect;
1930         }
1931
1932         if (digest_size) {
1933                 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
1934                 if (memcmp(dig_in, dig_vv, digest_size)) {
1935                         drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
1936                         return -EINVAL;
1937                 }
1938         }
1939
1940         D_ASSERT(peer_device->device, data_size == 0);
1941         return 0;
1942 }
1943
1944 /*
1945  * e_end_resync_block() is called in ack_sender context via
1946  * drbd_finish_peer_reqs().
1947  */
1948 static int e_end_resync_block(struct drbd_work *w, int unused)
1949 {
1950         struct drbd_peer_request *peer_req =
1951                 container_of(w, struct drbd_peer_request, w);
1952         struct drbd_peer_device *peer_device = peer_req->peer_device;
1953         struct drbd_device *device = peer_device->device;
1954         sector_t sector = peer_req->i.sector;
1955         int err;
1956
1957         D_ASSERT(device, drbd_interval_empty(&peer_req->i));
1958
1959         if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1960                 drbd_set_in_sync(device, sector, peer_req->i.size);
1961                 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
1962         } else {
1963                 /* Record failure to sync */
1964                 drbd_rs_failed_io(device, sector, peer_req->i.size);
1965
1966                 err  = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
1967         }
1968         dec_unacked(device);
1969
1970         return err;
1971 }
1972
1973 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
1974                             struct packet_info *pi) __releases(local)
1975 {
1976         struct drbd_device *device = peer_device->device;
1977         struct drbd_peer_request *peer_req;
1978
1979         peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
1980         if (!peer_req)
1981                 goto fail;
1982
1983         dec_rs_pending(device);
1984
1985         inc_unacked(device);
1986         /* corresponding dec_unacked() in e_end_resync_block()
1987          * respective _drbd_clear_done_ee */
1988
1989         peer_req->w.cb = e_end_resync_block;
1990         peer_req->submit_jif = jiffies;
1991
1992         spin_lock_irq(&device->resource->req_lock);
1993         list_add_tail(&peer_req->w.list, &device->sync_ee);
1994         spin_unlock_irq(&device->resource->req_lock);
1995
1996         atomic_add(pi->size >> 9, &device->rs_sect_ev);
1997         if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
1998                                      DRBD_FAULT_RS_WR) == 0)
1999                 return 0;
2000
2001         /* don't care for the reason here */
2002         drbd_err(device, "submit failed, triggering re-connect\n");
2003         spin_lock_irq(&device->resource->req_lock);
2004         list_del(&peer_req->w.list);
2005         spin_unlock_irq(&device->resource->req_lock);
2006
2007         drbd_free_peer_req(device, peer_req);
2008 fail:
2009         put_ldev(device);
2010         return -EIO;
2011 }
2012
2013 static struct drbd_request *
2014 find_request(struct drbd_device *device, struct rb_root *root, u64 id,
2015              sector_t sector, bool missing_ok, const char *func)
2016 {
2017         struct drbd_request *req;
2018
2019         /* Request object according to our peer */
2020         req = (struct drbd_request *)(unsigned long)id;
2021         if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
2022                 return req;
2023         if (!missing_ok) {
2024                 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
2025                         (unsigned long)id, (unsigned long long)sector);
2026         }
2027         return NULL;
2028 }
2029
2030 static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
2031 {
2032         struct drbd_peer_device *peer_device;
2033         struct drbd_device *device;
2034         struct drbd_request *req;
2035         sector_t sector;
2036         int err;
2037         struct p_data *p = pi->data;
2038
2039         peer_device = conn_peer_device(connection, pi->vnr);
2040         if (!peer_device)
2041                 return -EIO;
2042         device = peer_device->device;
2043
2044         sector = be64_to_cpu(p->sector);
2045
2046         spin_lock_irq(&device->resource->req_lock);
2047         req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
2048         spin_unlock_irq(&device->resource->req_lock);
2049         if (unlikely(!req))
2050                 return -EIO;
2051
2052         /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
2053          * special casing it there for the various failure cases.
2054          * still no race with drbd_fail_pending_reads */
2055         err = recv_dless_read(peer_device, req, sector, pi->size);
2056         if (!err)
2057                 req_mod(req, DATA_RECEIVED);
2058         /* else: nothing. handled from drbd_disconnect...
2059          * I don't think we may complete this just yet
2060          * in case we are "on-disconnect: freeze" */
2061
2062         return err;
2063 }
2064
2065 static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
2066 {
2067         struct drbd_peer_device *peer_device;
2068         struct drbd_device *device;
2069         sector_t sector;
2070         int err;
2071         struct p_data *p = pi->data;
2072
2073         peer_device = conn_peer_device(connection, pi->vnr);
2074         if (!peer_device)
2075                 return -EIO;
2076         device = peer_device->device;
2077
2078         sector = be64_to_cpu(p->sector);
2079         D_ASSERT(device, p->block_id == ID_SYNCER);
2080
2081         if (get_ldev(device)) {
2082                 /* data is submitted to disk within recv_resync_read.
2083                  * corresponding put_ldev done below on error,
2084                  * or in drbd_peer_request_endio. */
2085                 err = recv_resync_read(peer_device, sector, pi);
2086         } else {
2087                 if (__ratelimit(&drbd_ratelimit_state))
2088                         drbd_err(device, "Can not write resync data to local disk.\n");
2089
2090                 err = drbd_drain_block(peer_device, pi->size);
2091
2092                 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2093         }
2094
2095         atomic_add(pi->size >> 9, &device->rs_sect_in);
2096
2097         return err;
2098 }
2099
2100 static void restart_conflicting_writes(struct drbd_device *device,
2101                                        sector_t sector, int size)
2102 {
2103         struct drbd_interval *i;
2104         struct drbd_request *req;
2105
2106         drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2107                 if (!i->local)
2108                         continue;
2109                 req = container_of(i, struct drbd_request, i);
2110                 if (req->rq_state & RQ_LOCAL_PENDING ||
2111                     !(req->rq_state & RQ_POSTPONED))
2112                         continue;
2113                 /* as it is RQ_POSTPONED, this will cause it to
2114                  * be queued on the retry workqueue. */
2115                 __req_mod(req, CONFLICT_RESOLVED, NULL);
2116         }
2117 }
2118
2119 /*
2120  * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
2121  */
2122 static int e_end_block(struct drbd_work *w, int cancel)
2123 {
2124         struct drbd_peer_request *peer_req =
2125                 container_of(w, struct drbd_peer_request, w);
2126         struct drbd_peer_device *peer_device = peer_req->peer_device;
2127         struct drbd_device *device = peer_device->device;
2128         sector_t sector = peer_req->i.sector;
2129         int err = 0, pcmd;
2130
2131         if (peer_req->flags & EE_SEND_WRITE_ACK) {
2132                 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2133                         pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2134                                 device->state.conn <= C_PAUSED_SYNC_T &&
2135                                 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
2136                                 P_RS_WRITE_ACK : P_WRITE_ACK;
2137                         err = drbd_send_ack(peer_device, pcmd, peer_req);
2138                         if (pcmd == P_RS_WRITE_ACK)
2139                                 drbd_set_in_sync(device, sector, peer_req->i.size);
2140                 } else {
2141                         err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2142                         /* we expect it to be marked out of sync anyways...
2143                          * maybe assert this?  */
2144                 }
2145                 dec_unacked(device);
2146         }
2147
2148         /* we delete from the conflict detection hash _after_ we sent out the
2149          * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
2150         if (peer_req->flags & EE_IN_INTERVAL_TREE) {
2151                 spin_lock_irq(&device->resource->req_lock);
2152                 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2153                 drbd_remove_epoch_entry_interval(device, peer_req);
2154                 if (peer_req->flags & EE_RESTART_REQUESTS)
2155                         restart_conflicting_writes(device, sector, peer_req->i.size);
2156                 spin_unlock_irq(&device->resource->req_lock);
2157         } else
2158                 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2159
2160         drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2161
2162         return err;
2163 }
2164
2165 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
2166 {
2167         struct drbd_peer_request *peer_req =
2168                 container_of(w, struct drbd_peer_request, w);
2169         struct drbd_peer_device *peer_device = peer_req->peer_device;
2170         int err;
2171
2172         err = drbd_send_ack(peer_device, ack, peer_req);
2173         dec_unacked(peer_device->device);
2174
2175         return err;
2176 }
2177
2178 static int e_send_superseded(struct drbd_work *w, int unused)
2179 {
2180         return e_send_ack(w, P_SUPERSEDED);
2181 }
2182
2183 static int e_send_retry_write(struct drbd_work *w, int unused)
2184 {
2185         struct drbd_peer_request *peer_req =
2186                 container_of(w, struct drbd_peer_request, w);
2187         struct drbd_connection *connection = peer_req->peer_device->connection;
2188
2189         return e_send_ack(w, connection->agreed_pro_version >= 100 ?
2190                              P_RETRY_WRITE : P_SUPERSEDED);
2191 }
2192
2193 static bool seq_greater(u32 a, u32 b)
2194 {
2195         /*
2196          * We assume 32-bit wrap-around here.
2197          * For 24-bit wrap-around, we would have to shift:
2198          *  a <<= 8; b <<= 8;
2199          */
2200         return (s32)a - (s32)b > 0;
2201 }
2202
2203 static u32 seq_max(u32 a, u32 b)
2204 {
2205         return seq_greater(a, b) ? a : b;
2206 }
2207
2208 static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
2209 {
2210         struct drbd_device *device = peer_device->device;
2211         unsigned int newest_peer_seq;
2212
2213         if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
2214                 spin_lock(&device->peer_seq_lock);
2215                 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2216                 device->peer_seq = newest_peer_seq;
2217                 spin_unlock(&device->peer_seq_lock);
2218                 /* wake up only if we actually changed device->peer_seq */
2219                 if (peer_seq == newest_peer_seq)
2220                         wake_up(&device->seq_wait);
2221         }
2222 }
2223
2224 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
2225 {
2226         return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2227 }
2228
2229 /* maybe change sync_ee into interval trees as well? */
2230 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2231 {
2232         struct drbd_peer_request *rs_req;
2233         bool rv = false;
2234
2235         spin_lock_irq(&device->resource->req_lock);
2236         list_for_each_entry(rs_req, &device->sync_ee, w.list) {
2237                 if (overlaps(peer_req->i.sector, peer_req->i.size,
2238                              rs_req->i.sector, rs_req->i.size)) {
2239                         rv = true;
2240                         break;
2241                 }
2242         }
2243         spin_unlock_irq(&device->resource->req_lock);
2244
2245         return rv;
2246 }
2247
2248 /* Called from receive_Data.
2249  * Synchronize packets on sock with packets on msock.
2250  *
2251  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2252  * packet traveling on msock, they are still processed in the order they have
2253  * been sent.
2254  *
2255  * Note: we don't care for Ack packets overtaking P_DATA packets.
2256  *
2257  * In case packet_seq is larger than device->peer_seq number, there are
2258  * outstanding packets on the msock. We wait for them to arrive.
2259  * In case we are the logically next packet, we update device->peer_seq
2260  * ourselves. Correctly handles 32bit wrap around.
2261  *
2262  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2263  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2264  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2265  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2266  *
2267  * returns 0 if we may process the packet,
2268  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
2269 static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
2270 {
2271         struct drbd_device *device = peer_device->device;
2272         DEFINE_WAIT(wait);
2273         long timeout;
2274         int ret = 0, tp;
2275
2276         if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
2277                 return 0;
2278
2279         spin_lock(&device->peer_seq_lock);
2280         for (;;) {
2281                 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2282                         device->peer_seq = seq_max(device->peer_seq, peer_seq);
2283                         break;
2284                 }
2285
2286                 if (signal_pending(current)) {
2287                         ret = -ERESTARTSYS;
2288                         break;
2289                 }
2290
2291                 rcu_read_lock();
2292                 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
2293                 rcu_read_unlock();
2294
2295                 if (!tp)
2296                         break;
2297
2298                 /* Only need to wait if two_primaries is enabled */
2299                 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2300                 spin_unlock(&device->peer_seq_lock);
2301                 rcu_read_lock();
2302                 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
2303                 rcu_read_unlock();
2304                 timeout = schedule_timeout(timeout);
2305                 spin_lock(&device->peer_seq_lock);
2306                 if (!timeout) {
2307                         ret = -ETIMEDOUT;
2308                         drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
2309                         break;
2310                 }
2311         }
2312         spin_unlock(&device->peer_seq_lock);
2313         finish_wait(&device->seq_wait, &wait);
2314         return ret;
2315 }
2316
2317 /* see also bio_flags_to_wire()
2318  * DRBD_REQ_*, because we need to semantically map the flags to data packet
2319  * flags and back. We may replicate to other kernel versions. */
2320 static unsigned long wire_flags_to_bio_flags(u32 dpf)
2321 {
2322         return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2323                 (dpf & DP_FUA ? REQ_FUA : 0) |
2324                 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
2325 }
2326
2327 static unsigned long wire_flags_to_bio_op(u32 dpf)
2328 {
2329         if (dpf & DP_DISCARD)
2330                 return REQ_OP_WRITE_ZEROES;
2331         else
2332                 return REQ_OP_WRITE;
2333 }
2334
2335 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2336                                     unsigned int size)
2337 {
2338         struct drbd_interval *i;
2339
2340     repeat:
2341         drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2342                 struct drbd_request *req;
2343                 struct bio_and_error m;
2344
2345                 if (!i->local)
2346                         continue;
2347                 req = container_of(i, struct drbd_request, i);
2348                 if (!(req->rq_state & RQ_POSTPONED))
2349                         continue;
2350                 req->rq_state &= ~RQ_POSTPONED;
2351                 __req_mod(req, NEG_ACKED, &m);
2352                 spin_unlock_irq(&device->resource->req_lock);
2353                 if (m.bio)
2354                         complete_master_bio(device, &m);
2355                 spin_lock_irq(&device->resource->req_lock);
2356                 goto repeat;
2357         }
2358 }
2359
2360 static int handle_write_conflicts(struct drbd_device *device,
2361                                   struct drbd_peer_request *peer_req)
2362 {
2363         struct drbd_connection *connection = peer_req->peer_device->connection;
2364         bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2365         sector_t sector = peer_req->i.sector;
2366         const unsigned int size = peer_req->i.size;
2367         struct drbd_interval *i;
2368         bool equal;
2369         int err;
2370
2371         /*
2372          * Inserting the peer request into the write_requests tree will prevent
2373          * new conflicting local requests from being added.
2374          */
2375         drbd_insert_interval(&device->write_requests, &peer_req->i);
2376
2377     repeat:
2378         drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2379                 if (i == &peer_req->i)
2380                         continue;
2381                 if (i->completed)
2382                         continue;
2383
2384                 if (!i->local) {
2385                         /*
2386                          * Our peer has sent a conflicting remote request; this
2387                          * should not happen in a two-node setup.  Wait for the
2388                          * earlier peer request to complete.
2389                          */
2390                         err = drbd_wait_misc(device, i);
2391                         if (err)
2392                                 goto out;
2393                         goto repeat;
2394                 }
2395
2396                 equal = i->sector == sector && i->size == size;
2397                 if (resolve_conflicts) {
2398                         /*
2399                          * If the peer request is fully contained within the
2400                          * overlapping request, it can be considered overwritten
2401                          * and thus superseded; otherwise, it will be retried
2402                          * once all overlapping requests have completed.
2403                          */
2404                         bool superseded = i->sector <= sector && i->sector +
2405                                        (i->size >> 9) >= sector + (size >> 9);
2406
2407                         if (!equal)
2408                                 drbd_alert(device, "Concurrent writes detected: "
2409                                                "local=%llus +%u, remote=%llus +%u, "
2410                                                "assuming %s came first\n",
2411                                           (unsigned long long)i->sector, i->size,
2412                                           (unsigned long long)sector, size,
2413                                           superseded ? "local" : "remote");
2414
2415                         peer_req->w.cb = superseded ? e_send_superseded :
2416                                                    e_send_retry_write;
2417                         list_add_tail(&peer_req->w.list, &device->done_ee);
2418                         queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
2419
2420                         err = -ENOENT;
2421                         goto out;
2422                 } else {
2423                         struct drbd_request *req =
2424                                 container_of(i, struct drbd_request, i);
2425
2426                         if (!equal)
2427                                 drbd_alert(device, "Concurrent writes detected: "
2428                                                "local=%llus +%u, remote=%llus +%u\n",
2429                                           (unsigned long long)i->sector, i->size,
2430                                           (unsigned long long)sector, size);
2431
2432                         if (req->rq_state & RQ_LOCAL_PENDING ||
2433                             !(req->rq_state & RQ_POSTPONED)) {
2434                                 /*
2435                                  * Wait for the node with the discard flag to
2436                                  * decide if this request has been superseded
2437                                  * or needs to be retried.
2438                                  * Requests that have been superseded will
2439                                  * disappear from the write_requests tree.
2440                                  *
2441                                  * In addition, wait for the conflicting
2442                                  * request to finish locally before submitting
2443                                  * the conflicting peer request.
2444                                  */
2445                                 err = drbd_wait_misc(device, &req->i);
2446                                 if (err) {
2447                                         _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
2448                                         fail_postponed_requests(device, sector, size);
2449                                         goto out;
2450                                 }
2451                                 goto repeat;
2452                         }
2453                         /*
2454                          * Remember to restart the conflicting requests after
2455                          * the new peer request has completed.
2456                          */
2457                         peer_req->flags |= EE_RESTART_REQUESTS;
2458                 }
2459         }
2460         err = 0;
2461
2462     out:
2463         if (err)
2464                 drbd_remove_epoch_entry_interval(device, peer_req);
2465         return err;
2466 }
2467
2468 /* mirrored write */
2469 static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
2470 {
2471         struct drbd_peer_device *peer_device;
2472         struct drbd_device *device;
2473         struct net_conf *nc;
2474         sector_t sector;
2475         struct drbd_peer_request *peer_req;
2476         struct p_data *p = pi->data;
2477         u32 peer_seq = be32_to_cpu(p->seq_num);
2478         int op, op_flags;
2479         u32 dp_flags;
2480         int err, tp;
2481
2482         peer_device = conn_peer_device(connection, pi->vnr);
2483         if (!peer_device)
2484                 return -EIO;
2485         device = peer_device->device;
2486
2487         if (!get_ldev(device)) {
2488                 int err2;
2489
2490                 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2491                 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2492                 atomic_inc(&connection->current_epoch->epoch_size);
2493                 err2 = drbd_drain_block(peer_device, pi->size);
2494                 if (!err)
2495                         err = err2;
2496                 return err;
2497         }
2498
2499         /*
2500          * Corresponding put_ldev done either below (on various errors), or in
2501          * drbd_peer_request_endio, if we successfully submit the data at the
2502          * end of this function.
2503          */
2504
2505         sector = be64_to_cpu(p->sector);
2506         peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2507         if (!peer_req) {
2508                 put_ldev(device);
2509                 return -EIO;
2510         }
2511
2512         peer_req->w.cb = e_end_block;
2513         peer_req->submit_jif = jiffies;
2514         peer_req->flags |= EE_APPLICATION;
2515
2516         dp_flags = be32_to_cpu(p->dp_flags);
2517         op = wire_flags_to_bio_op(dp_flags);
2518         op_flags = wire_flags_to_bio_flags(dp_flags);
2519         if (pi->cmd == P_TRIM) {
2520                 D_ASSERT(peer_device, peer_req->i.size > 0);
2521                 D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
2522                 D_ASSERT(peer_device, peer_req->pages == NULL);
2523         } else if (peer_req->pages == NULL) {
2524                 D_ASSERT(device, peer_req->i.size == 0);
2525                 D_ASSERT(device, dp_flags & DP_FLUSH);
2526         }
2527
2528         if (dp_flags & DP_MAY_SET_IN_SYNC)
2529                 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2530
2531         spin_lock(&connection->epoch_lock);
2532         peer_req->epoch = connection->current_epoch;
2533         atomic_inc(&peer_req->epoch->epoch_size);
2534         atomic_inc(&peer_req->epoch->active);
2535         spin_unlock(&connection->epoch_lock);
2536
2537         rcu_read_lock();
2538         nc = rcu_dereference(peer_device->connection->net_conf);
2539         tp = nc->two_primaries;
2540         if (peer_device->connection->agreed_pro_version < 100) {
2541                 switch (nc->wire_protocol) {
2542                 case DRBD_PROT_C:
2543                         dp_flags |= DP_SEND_WRITE_ACK;
2544                         break;
2545                 case DRBD_PROT_B:
2546                         dp_flags |= DP_SEND_RECEIVE_ACK;
2547                         break;
2548                 }
2549         }
2550         rcu_read_unlock();
2551
2552         if (dp_flags & DP_SEND_WRITE_ACK) {
2553                 peer_req->flags |= EE_SEND_WRITE_ACK;
2554                 inc_unacked(device);
2555                 /* corresponding dec_unacked() in e_end_block()
2556                  * respective _drbd_clear_done_ee */
2557         }
2558
2559         if (dp_flags & DP_SEND_RECEIVE_ACK) {
2560                 /* I really don't like it that the receiver thread
2561                  * sends on the msock, but anyways */
2562                 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
2563         }
2564
2565         if (tp) {
2566                 /* two primaries implies protocol C */
2567                 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
2568                 peer_req->flags |= EE_IN_INTERVAL_TREE;
2569                 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2570                 if (err)
2571                         goto out_interrupted;
2572                 spin_lock_irq(&device->resource->req_lock);
2573                 err = handle_write_conflicts(device, peer_req);
2574                 if (err) {
2575                         spin_unlock_irq(&device->resource->req_lock);
2576                         if (err == -ENOENT) {
2577                                 put_ldev(device);
2578                                 return 0;
2579                         }
2580                         goto out_interrupted;
2581                 }
2582         } else {
2583                 update_peer_seq(peer_device, peer_seq);
2584                 spin_lock_irq(&device->resource->req_lock);
2585         }
2586         /* TRIM and WRITE_SAME are processed synchronously,
2587          * we wait for all pending requests, respectively wait for
2588          * active_ee to become empty in drbd_submit_peer_request();
2589          * better not add ourselves here. */
2590         if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0)
2591                 list_add_tail(&peer_req->w.list, &device->active_ee);
2592         spin_unlock_irq(&device->resource->req_lock);
2593
2594         if (device->state.conn == C_SYNC_TARGET)
2595                 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2596
2597         if (device->state.pdsk < D_INCONSISTENT) {
2598                 /* In case we have the only disk of the cluster, */
2599                 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2600                 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2601                 drbd_al_begin_io(device, &peer_req->i);
2602                 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2603         }
2604
2605         err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2606                                        DRBD_FAULT_DT_WR);
2607         if (!err)
2608                 return 0;
2609
2610         /* don't care for the reason here */
2611         drbd_err(device, "submit failed, triggering re-connect\n");
2612         spin_lock_irq(&device->resource->req_lock);
2613         list_del(&peer_req->w.list);
2614         drbd_remove_epoch_entry_interval(device, peer_req);
2615         spin_unlock_irq(&device->resource->req_lock);
2616         if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2617                 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
2618                 drbd_al_complete_io(device, &peer_req->i);
2619         }
2620
2621 out_interrupted:
2622         drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
2623         put_ldev(device);
2624         drbd_free_peer_req(device, peer_req);
2625         return err;
2626 }
2627
2628 /* We may throttle resync, if the lower device seems to be busy,
2629  * and current sync rate is above c_min_rate.
2630  *
2631  * To decide whether or not the lower device is busy, we use a scheme similar
2632  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2633  * (more than 64 sectors) of activity we cannot account for with our own resync
2634  * activity, it obviously is "busy".
2635  *
2636  * The current sync rate used here uses only the most recent two step marks,
2637  * to have a short time average so we can react faster.
2638  */
2639 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2640                 bool throttle_if_app_is_waiting)
2641 {
2642         struct lc_element *tmp;
2643         bool throttle = drbd_rs_c_min_rate_throttle(device);
2644
2645         if (!throttle || throttle_if_app_is_waiting)
2646                 return throttle;
2647
2648         spin_lock_irq(&device->al_lock);
2649         tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2650         if (tmp) {
2651                 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2652                 if (test_bit(BME_PRIORITY, &bm_ext->flags))
2653                         throttle = false;
2654                 /* Do not slow down if app IO is already waiting for this extent,
2655                  * and our progress is necessary for application IO to complete. */
2656         }
2657         spin_unlock_irq(&device->al_lock);
2658
2659         return throttle;
2660 }
2661
2662 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2663 {
2664         struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2665         unsigned long db, dt, dbdt;
2666         unsigned int c_min_rate;
2667         int curr_events;
2668
2669         rcu_read_lock();
2670         c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2671         rcu_read_unlock();
2672
2673         /* feature disabled? */
2674         if (c_min_rate == 0)
2675                 return false;
2676
2677         curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
2678                         atomic_read(&device->rs_sect_ev);
2679
2680         if (atomic_read(&device->ap_actlog_cnt)
2681             || curr_events - device->rs_last_events > 64) {
2682                 unsigned long rs_left;
2683                 int i;
2684
2685                 device->rs_last_events = curr_events;
2686
2687                 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2688                  * approx. */
2689                 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2690
2691                 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2692                         rs_left = device->ov_left;
2693                 else
2694                         rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2695
2696                 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2697                 if (!dt)
2698                         dt++;
2699                 db = device->rs_mark_left[i] - rs_left;
2700                 dbdt = Bit2KB(db/dt);
2701
2702                 if (dbdt > c_min_rate)
2703                         return true;
2704         }
2705         return false;
2706 }
2707
2708 static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
2709 {
2710         struct drbd_peer_device *peer_device;
2711         struct drbd_device *device;
2712         sector_t sector;
2713         sector_t capacity;
2714         struct drbd_peer_request *peer_req;
2715         struct digest_info *di = NULL;
2716         int size, verb;
2717         unsigned int fault_type;
2718         struct p_block_req *p = pi->data;
2719
2720         peer_device = conn_peer_device(connection, pi->vnr);
2721         if (!peer_device)
2722                 return -EIO;
2723         device = peer_device->device;
2724         capacity = drbd_get_capacity(device->this_bdev);
2725
2726         sector = be64_to_cpu(p->sector);
2727         size   = be32_to_cpu(p->blksize);
2728
2729         if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2730                 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2731                                 (unsigned long long)sector, size);
2732                 return -EINVAL;
2733         }
2734         if (sector + (size>>9) > capacity) {
2735                 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2736                                 (unsigned long long)sector, size);
2737                 return -EINVAL;
2738         }
2739
2740         if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2741                 verb = 1;
2742                 switch (pi->cmd) {
2743                 case P_DATA_REQUEST:
2744                         drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
2745                         break;
2746                 case P_RS_THIN_REQ:
2747                 case P_RS_DATA_REQUEST:
2748                 case P_CSUM_RS_REQUEST:
2749                 case P_OV_REQUEST:
2750                         drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
2751                         break;
2752                 case P_OV_REPLY:
2753                         verb = 0;
2754                         dec_rs_pending(device);
2755                         drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2756                         break;
2757                 default:
2758                         BUG();
2759                 }
2760                 if (verb && __ratelimit(&drbd_ratelimit_state))
2761                         drbd_err(device, "Can not satisfy peer's read request, "
2762                             "no local data.\n");
2763
2764                 /* drain possibly payload */
2765                 return drbd_drain_block(peer_device, pi->size);
2766         }
2767
2768         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2769          * "criss-cross" setup, that might cause write-out on some other DRBD,
2770          * which in turn might block on the other node at this very place.  */
2771         peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2772                         size, GFP_NOIO);
2773         if (!peer_req) {
2774                 put_ldev(device);
2775                 return -ENOMEM;
2776         }
2777
2778         switch (pi->cmd) {
2779         case P_DATA_REQUEST:
2780                 peer_req->w.cb = w_e_end_data_req;
2781                 fault_type = DRBD_FAULT_DT_RD;
2782                 /* application IO, don't drbd_rs_begin_io */
2783                 peer_req->flags |= EE_APPLICATION;
2784                 goto submit;
2785
2786         case P_RS_THIN_REQ:
2787                 /* If at some point in the future we have a smart way to
2788                    find out if this data block is completely deallocated,
2789                    then we would do something smarter here than reading
2790                    the block... */
2791                 peer_req->flags |= EE_RS_THIN_REQ;
2792                 /* fall through */
2793         case P_RS_DATA_REQUEST:
2794                 peer_req->w.cb = w_e_end_rsdata_req;
2795                 fault_type = DRBD_FAULT_RS_RD;
2796                 /* used in the sector offset progress display */
2797                 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2798                 break;
2799
2800         case P_OV_REPLY:
2801         case P_CSUM_RS_REQUEST:
2802                 fault_type = DRBD_FAULT_RS_RD;
2803                 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2804                 if (!di)
2805                         goto out_free_e;
2806
2807                 di->digest_size = pi->size;
2808                 di->digest = (((char *)di)+sizeof(struct digest_info));
2809
2810                 peer_req->digest = di;
2811                 peer_req->flags |= EE_HAS_DIGEST;
2812
2813                 if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
2814                         goto out_free_e;
2815
2816                 if (pi->cmd == P_CSUM_RS_REQUEST) {
2817                         D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2818                         peer_req->w.cb = w_e_end_csum_rs_req;
2819                         /* used in the sector offset progress display */
2820                         device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2821                         /* remember to report stats in drbd_resync_finished */
2822                         device->use_csums = true;
2823                 } else if (pi->cmd == P_OV_REPLY) {
2824                         /* track progress, we may need to throttle */
2825                         atomic_add(size >> 9, &device->rs_sect_in);
2826                         peer_req->w.cb = w_e_end_ov_reply;
2827                         dec_rs_pending(device);
2828                         /* drbd_rs_begin_io done when we sent this request,
2829                          * but accounting still needs to be done. */
2830                         goto submit_for_resync;
2831                 }
2832                 break;
2833
2834         case P_OV_REQUEST:
2835                 if (device->ov_start_sector == ~(sector_t)0 &&
2836                     peer_device->connection->agreed_pro_version >= 90) {
2837                         unsigned long now = jiffies;
2838                         int i;
2839                         device->ov_start_sector = sector;
2840                         device->ov_position = sector;
2841                         device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2842                         device->rs_total = device->ov_left;
2843                         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2844                                 device->rs_mark_left[i] = device->ov_left;
2845                                 device->rs_mark_time[i] = now;
2846                         }
2847                         drbd_info(device, "Online Verify start sector: %llu\n",
2848                                         (unsigned long long)sector);
2849                 }
2850                 peer_req->w.cb = w_e_end_ov_req;
2851                 fault_type = DRBD_FAULT_RS_RD;
2852                 break;
2853
2854         default:
2855                 BUG();
2856         }
2857
2858         /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2859          * wrt the receiver, but it is not as straightforward as it may seem.
2860          * Various places in the resync start and stop logic assume resync
2861          * requests are processed in order, requeuing this on the worker thread
2862          * introduces a bunch of new code for synchronization between threads.
2863          *
2864          * Unlimited throttling before drbd_rs_begin_io may stall the resync
2865          * "forever", throttling after drbd_rs_begin_io will lock that extent
2866          * for application writes for the same time.  For now, just throttle
2867          * here, where the rest of the code expects the receiver to sleep for
2868          * a while, anyways.
2869          */
2870
2871         /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2872          * this defers syncer requests for some time, before letting at least
2873          * on request through.  The resync controller on the receiving side
2874          * will adapt to the incoming rate accordingly.
2875          *
2876          * We cannot throttle here if remote is Primary/SyncTarget:
2877          * we would also throttle its application reads.
2878          * In that case, throttling is done on the SyncTarget only.
2879          */
2880
2881         /* Even though this may be a resync request, we do add to "read_ee";
2882          * "sync_ee" is only used for resync WRITEs.
2883          * Add to list early, so debugfs can find this request
2884          * even if we have to sleep below. */
2885         spin_lock_irq(&device->resource->req_lock);
2886         list_add_tail(&peer_req->w.list, &device->read_ee);
2887         spin_unlock_irq(&device->resource->req_lock);
2888
2889         update_receiver_timing_details(connection, drbd_rs_should_slow_down);
2890         if (device->state.peer != R_PRIMARY
2891         && drbd_rs_should_slow_down(device, sector, false))
2892                 schedule_timeout_uninterruptible(HZ/10);
2893         update_receiver_timing_details(connection, drbd_rs_begin_io);
2894         if (drbd_rs_begin_io(device, sector))
2895                 goto out_free_e;
2896
2897 submit_for_resync:
2898         atomic_add(size >> 9, &device->rs_sect_ev);
2899
2900 submit:
2901         update_receiver_timing_details(connection, drbd_submit_peer_request);
2902         inc_unacked(device);
2903         if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
2904                                      fault_type) == 0)
2905                 return 0;
2906
2907         /* don't care for the reason here */
2908         drbd_err(device, "submit failed, triggering re-connect\n");
2909
2910 out_free_e:
2911         spin_lock_irq(&device->resource->req_lock);
2912         list_del(&peer_req->w.list);
2913         spin_unlock_irq(&device->resource->req_lock);
2914         /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2915
2916         put_ldev(device);
2917         drbd_free_peer_req(device, peer_req);
2918         return -EIO;
2919 }
2920
2921 /**
2922  * drbd_asb_recover_0p  -  Recover after split-brain with no remaining primaries
2923  */
2924 static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
2925 {
2926         struct drbd_device *device = peer_device->device;
2927         int self, peer, rv = -100;
2928         unsigned long ch_self, ch_peer;
2929         enum drbd_after_sb_p after_sb_0p;
2930
2931         self = device->ldev->md.uuid[UI_BITMAP] & 1;
2932         peer = device->p_uuid[UI_BITMAP] & 1;
2933
2934         ch_peer = device->p_uuid[UI_SIZE];
2935         ch_self = device->comm_bm_set;
2936
2937         rcu_read_lock();
2938         after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
2939         rcu_read_unlock();
2940         switch (after_sb_0p) {
2941         case ASB_CONSENSUS:
2942         case ASB_DISCARD_SECONDARY:
2943         case ASB_CALL_HELPER:
2944         case ASB_VIOLENTLY:
2945                 drbd_err(device, "Configuration error.\n");
2946                 break;
2947         case ASB_DISCONNECT:
2948                 break;
2949         case ASB_DISCARD_YOUNGER_PRI:
2950                 if (self == 0 && peer == 1) {
2951                         rv = -1;
2952                         break;
2953                 }
2954                 if (self == 1 && peer == 0) {
2955                         rv =  1;
2956                         break;
2957                 }
2958                 /* Else fall through to one of the other strategies... */
2959         case ASB_DISCARD_OLDER_PRI:
2960                 if (self == 0 && peer == 1) {
2961                         rv = 1;
2962                         break;
2963                 }
2964                 if (self == 1 && peer == 0) {
2965                         rv = -1;
2966                         break;
2967                 }
2968                 /* Else fall through to one of the other strategies... */
2969                 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
2970                      "Using discard-least-changes instead\n");
2971                 /* fall through */
2972         case ASB_DISCARD_ZERO_CHG:
2973                 if (ch_peer == 0 && ch_self == 0) {
2974                         rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
2975                                 ? -1 : 1;
2976                         break;
2977                 } else {
2978                         if (ch_peer == 0) { rv =  1; break; }
2979                         if (ch_self == 0) { rv = -1; break; }
2980                 }
2981                 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2982                         break;
2983                 /* else: fall through */
2984         case ASB_DISCARD_LEAST_CHG:
2985                 if      (ch_self < ch_peer)
2986                         rv = -1;
2987                 else if (ch_self > ch_peer)
2988                         rv =  1;
2989                 else /* ( ch_self == ch_peer ) */
2990                      /* Well, then use something else. */
2991                         rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
2992                                 ? -1 : 1;
2993                 break;
2994         case ASB_DISCARD_LOCAL:
2995                 rv = -1;
2996                 break;
2997         case ASB_DISCARD_REMOTE:
2998                 rv =  1;
2999         }
3000
3001         return rv;
3002 }
3003
3004 /**
3005  * drbd_asb_recover_1p  -  Recover after split-brain with one remaining primary
3006  */
3007 static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
3008 {
3009         struct drbd_device *device = peer_device->device;
3010         int hg, rv = -100;
3011         enum drbd_after_sb_p after_sb_1p;
3012
3013         rcu_read_lock();
3014         after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
3015         rcu_read_unlock();
3016         switch (after_sb_1p) {
3017         case ASB_DISCARD_YOUNGER_PRI:
3018         case ASB_DISCARD_OLDER_PRI:
3019         case ASB_DISCARD_LEAST_CHG:
3020         case ASB_DISCARD_LOCAL:
3021         case ASB_DISCARD_REMOTE:
3022         case ASB_DISCARD_ZERO_CHG:
3023                 drbd_err(device, "Configuration error.\n");
3024                 break;
3025         case ASB_DISCONNECT:
3026                 break;
3027         case ASB_CONSENSUS:
3028                 hg = drbd_asb_recover_0p(peer_device);
3029                 if (hg == -1 && device->state.role == R_SECONDARY)
3030                         rv = hg;
3031                 if (hg == 1  && device->state.role == R_PRIMARY)
3032                         rv = hg;
3033                 break;
3034         case ASB_VIOLENTLY:
3035                 rv = drbd_asb_recover_0p(peer_device);
3036                 break;
3037         case ASB_DISCARD_SECONDARY:
3038                 return device->state.role == R_PRIMARY ? 1 : -1;
3039         case ASB_CALL_HELPER:
3040                 hg = drbd_asb_recover_0p(peer_device);
3041                 if (hg == -1 && device->state.role == R_PRIMARY) {
3042                         enum drbd_state_rv rv2;
3043
3044                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3045                           * we might be here in C_WF_REPORT_PARAMS which is transient.
3046                           * we do not need to wait for the after state change work either. */
3047                         rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3048                         if (rv2 != SS_SUCCESS) {
3049                                 drbd_khelper(device, "pri-lost-after-sb");
3050                         } else {
3051                                 drbd_warn(device, "Successfully gave up primary role.\n");
3052                                 rv = hg;
3053                         }
3054                 } else
3055                         rv = hg;
3056         }
3057
3058         return rv;
3059 }
3060
3061 /**
3062  * drbd_asb_recover_2p  -  Recover after split-brain with two remaining primaries
3063  */
3064 static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
3065 {
3066         struct drbd_device *device = peer_device->device;
3067         int hg, rv = -100;