ioprio: rcu_read_lock/unlock protect find_task_by_vpid call (V2)
[~shefty/rdma-dev.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2    drbd_receiver.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
47 #include "drbd_int.h"
48 #include "drbd_req.h"
49
50 #include "drbd_vli.h"
51
52 struct flush_work {
53         struct drbd_work w;
54         struct drbd_epoch *epoch;
55 };
56
57 enum finish_epoch {
58         FE_STILL_LIVE,
59         FE_DESTROYED,
60         FE_RECYCLED,
61 };
62
63 static int drbd_do_handshake(struct drbd_conf *mdev);
64 static int drbd_do_auth(struct drbd_conf *mdev);
65
66 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68
69 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70 {
71         struct drbd_epoch *prev;
72         spin_lock(&mdev->epoch_lock);
73         prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74         if (prev == epoch || prev == mdev->current_epoch)
75                 prev = NULL;
76         spin_unlock(&mdev->epoch_lock);
77         return prev;
78 }
79
80 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81
82 /*
83  * some helper functions to deal with single linked page lists,
84  * page->private being our "next" pointer.
85  */
86
87 /* If at least n pages are linked at head, get n pages off.
88  * Otherwise, don't modify head, and return NULL.
89  * Locking is the responsibility of the caller.
90  */
91 static struct page *page_chain_del(struct page **head, int n)
92 {
93         struct page *page;
94         struct page *tmp;
95
96         BUG_ON(!n);
97         BUG_ON(!head);
98
99         page = *head;
100
101         if (!page)
102                 return NULL;
103
104         while (page) {
105                 tmp = page_chain_next(page);
106                 if (--n == 0)
107                         break; /* found sufficient pages */
108                 if (tmp == NULL)
109                         /* insufficient pages, don't use any of them. */
110                         return NULL;
111                 page = tmp;
112         }
113
114         /* add end of list marker for the returned list */
115         set_page_private(page, 0);
116         /* actual return value, and adjustment of head */
117         page = *head;
118         *head = tmp;
119         return page;
120 }
121
122 /* may be used outside of locks to find the tail of a (usually short)
123  * "private" page chain, before adding it back to a global chain head
124  * with page_chain_add() under a spinlock. */
125 static struct page *page_chain_tail(struct page *page, int *len)
126 {
127         struct page *tmp;
128         int i = 1;
129         while ((tmp = page_chain_next(page)))
130                 ++i, page = tmp;
131         if (len)
132                 *len = i;
133         return page;
134 }
135
136 static int page_chain_free(struct page *page)
137 {
138         struct page *tmp;
139         int i = 0;
140         page_chain_for_each_safe(page, tmp) {
141                 put_page(page);
142                 ++i;
143         }
144         return i;
145 }
146
147 static void page_chain_add(struct page **head,
148                 struct page *chain_first, struct page *chain_last)
149 {
150 #if 1
151         struct page *tmp;
152         tmp = page_chain_tail(chain_first, NULL);
153         BUG_ON(tmp != chain_last);
154 #endif
155
156         /* add chain to head */
157         set_page_private(chain_last, (unsigned long)*head);
158         *head = chain_first;
159 }
160
161 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
162 {
163         struct page *page = NULL;
164         struct page *tmp = NULL;
165         int i = 0;
166
167         /* Yes, testing drbd_pp_vacant outside the lock is racy.
168          * So what. It saves a spin_lock. */
169         if (drbd_pp_vacant >= number) {
170                 spin_lock(&drbd_pp_lock);
171                 page = page_chain_del(&drbd_pp_pool, number);
172                 if (page)
173                         drbd_pp_vacant -= number;
174                 spin_unlock(&drbd_pp_lock);
175                 if (page)
176                         return page;
177         }
178
179         /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180          * "criss-cross" setup, that might cause write-out on some other DRBD,
181          * which in turn might block on the other node at this very place.  */
182         for (i = 0; i < number; i++) {
183                 tmp = alloc_page(GFP_TRY);
184                 if (!tmp)
185                         break;
186                 set_page_private(tmp, (unsigned long)page);
187                 page = tmp;
188         }
189
190         if (i == number)
191                 return page;
192
193         /* Not enough pages immediately available this time.
194          * No need to jump around here, drbd_pp_alloc will retry this
195          * function "soon". */
196         if (page) {
197                 tmp = page_chain_tail(page, NULL);
198                 spin_lock(&drbd_pp_lock);
199                 page_chain_add(&drbd_pp_pool, page, tmp);
200                 drbd_pp_vacant += i;
201                 spin_unlock(&drbd_pp_lock);
202         }
203         return NULL;
204 }
205
206 /* kick lower level device, if we have more than (arbitrary number)
207  * reference counts on it, which typically are locally submitted io
208  * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
209 static void maybe_kick_lo(struct drbd_conf *mdev)
210 {
211         if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
212                 drbd_kick_lo(mdev);
213 }
214
215 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
216 {
217         struct drbd_epoch_entry *e;
218         struct list_head *le, *tle;
219
220         /* The EEs are always appended to the end of the list. Since
221            they are sent in order over the wire, they have to finish
222            in order. As soon as we see the first not finished we can
223            stop to examine the list... */
224
225         list_for_each_safe(le, tle, &mdev->net_ee) {
226                 e = list_entry(le, struct drbd_epoch_entry, w.list);
227                 if (drbd_ee_has_active_page(e))
228                         break;
229                 list_move(le, to_be_freed);
230         }
231 }
232
233 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
234 {
235         LIST_HEAD(reclaimed);
236         struct drbd_epoch_entry *e, *t;
237
238         maybe_kick_lo(mdev);
239         spin_lock_irq(&mdev->req_lock);
240         reclaim_net_ee(mdev, &reclaimed);
241         spin_unlock_irq(&mdev->req_lock);
242
243         list_for_each_entry_safe(e, t, &reclaimed, w.list)
244                 drbd_free_net_ee(mdev, e);
245 }
246
247 /**
248  * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
249  * @mdev:       DRBD device.
250  * @number:     number of pages requested
251  * @retry:      whether to retry, if not enough pages are available right now
252  *
253  * Tries to allocate number pages, first from our own page pool, then from
254  * the kernel, unless this allocation would exceed the max_buffers setting.
255  * Possibly retry until DRBD frees sufficient pages somewhere else.
256  *
257  * Returns a page chain linked via page->private.
258  */
259 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
260 {
261         struct page *page = NULL;
262         DEFINE_WAIT(wait);
263
264         /* Yes, we may run up to @number over max_buffers. If we
265          * follow it strictly, the admin will get it wrong anyways. */
266         if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267                 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
268
269         while (page == NULL) {
270                 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
271
272                 drbd_kick_lo_and_reclaim_net(mdev);
273
274                 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
275                         page = drbd_pp_first_pages_or_try_alloc(mdev, number);
276                         if (page)
277                                 break;
278                 }
279
280                 if (!retry)
281                         break;
282
283                 if (signal_pending(current)) {
284                         dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
285                         break;
286                 }
287
288                 schedule();
289         }
290         finish_wait(&drbd_pp_wait, &wait);
291
292         if (page)
293                 atomic_add(number, &mdev->pp_in_use);
294         return page;
295 }
296
297 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
298  * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299  * Either links the page chain back to the global pool,
300  * or returns all pages to the system. */
301 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
302 {
303         atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
304         int i;
305
306         if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
307                 i = page_chain_free(page);
308         else {
309                 struct page *tmp;
310                 tmp = page_chain_tail(page, &i);
311                 spin_lock(&drbd_pp_lock);
312                 page_chain_add(&drbd_pp_pool, page, tmp);
313                 drbd_pp_vacant += i;
314                 spin_unlock(&drbd_pp_lock);
315         }
316         i = atomic_sub_return(i, a);
317         if (i < 0)
318                 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
319                         is_net ? "pp_in_use_by_net" : "pp_in_use", i);
320         wake_up(&drbd_pp_wait);
321 }
322
323 /*
324 You need to hold the req_lock:
325  _drbd_wait_ee_list_empty()
326
327 You must not have the req_lock:
328  drbd_free_ee()
329  drbd_alloc_ee()
330  drbd_init_ee()
331  drbd_release_ee()
332  drbd_ee_fix_bhs()
333  drbd_process_done_ee()
334  drbd_clear_done_ee()
335  drbd_wait_ee_list_empty()
336 */
337
338 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
339                                      u64 id,
340                                      sector_t sector,
341                                      unsigned int data_size,
342                                      gfp_t gfp_mask) __must_hold(local)
343 {
344         struct drbd_epoch_entry *e;
345         struct page *page;
346         unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
347
348         if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
349                 return NULL;
350
351         e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
352         if (!e) {
353                 if (!(gfp_mask & __GFP_NOWARN))
354                         dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
355                 return NULL;
356         }
357
358         page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
359         if (!page)
360                 goto fail;
361
362         INIT_HLIST_NODE(&e->colision);
363         e->epoch = NULL;
364         e->mdev = mdev;
365         e->pages = page;
366         atomic_set(&e->pending_bios, 0);
367         e->size = data_size;
368         e->flags = 0;
369         e->sector = sector;
370         e->block_id = id;
371
372         return e;
373
374  fail:
375         mempool_free(e, drbd_ee_mempool);
376         return NULL;
377 }
378
379 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
380 {
381         if (e->flags & EE_HAS_DIGEST)
382                 kfree(e->digest);
383         drbd_pp_free(mdev, e->pages, is_net);
384         D_ASSERT(atomic_read(&e->pending_bios) == 0);
385         D_ASSERT(hlist_unhashed(&e->colision));
386         mempool_free(e, drbd_ee_mempool);
387 }
388
389 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
390 {
391         LIST_HEAD(work_list);
392         struct drbd_epoch_entry *e, *t;
393         int count = 0;
394         int is_net = list == &mdev->net_ee;
395
396         spin_lock_irq(&mdev->req_lock);
397         list_splice_init(list, &work_list);
398         spin_unlock_irq(&mdev->req_lock);
399
400         list_for_each_entry_safe(e, t, &work_list, w.list) {
401                 drbd_free_some_ee(mdev, e, is_net);
402                 count++;
403         }
404         return count;
405 }
406
407
408 /*
409  * This function is called from _asender only_
410  * but see also comments in _req_mod(,barrier_acked)
411  * and receive_Barrier.
412  *
413  * Move entries from net_ee to done_ee, if ready.
414  * Grab done_ee, call all callbacks, free the entries.
415  * The callbacks typically send out ACKs.
416  */
417 static int drbd_process_done_ee(struct drbd_conf *mdev)
418 {
419         LIST_HEAD(work_list);
420         LIST_HEAD(reclaimed);
421         struct drbd_epoch_entry *e, *t;
422         int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
423
424         spin_lock_irq(&mdev->req_lock);
425         reclaim_net_ee(mdev, &reclaimed);
426         list_splice_init(&mdev->done_ee, &work_list);
427         spin_unlock_irq(&mdev->req_lock);
428
429         list_for_each_entry_safe(e, t, &reclaimed, w.list)
430                 drbd_free_net_ee(mdev, e);
431
432         /* possible callbacks here:
433          * e_end_block, and e_end_resync_block, e_send_discard_ack.
434          * all ignore the last argument.
435          */
436         list_for_each_entry_safe(e, t, &work_list, w.list) {
437                 /* list_del not necessary, next/prev members not touched */
438                 ok = e->w.cb(mdev, &e->w, !ok) && ok;
439                 drbd_free_ee(mdev, e);
440         }
441         wake_up(&mdev->ee_wait);
442
443         return ok;
444 }
445
446 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
447 {
448         DEFINE_WAIT(wait);
449
450         /* avoids spin_lock/unlock
451          * and calling prepare_to_wait in the fast path */
452         while (!list_empty(head)) {
453                 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454                 spin_unlock_irq(&mdev->req_lock);
455                 drbd_kick_lo(mdev);
456                 schedule();
457                 finish_wait(&mdev->ee_wait, &wait);
458                 spin_lock_irq(&mdev->req_lock);
459         }
460 }
461
462 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
463 {
464         spin_lock_irq(&mdev->req_lock);
465         _drbd_wait_ee_list_empty(mdev, head);
466         spin_unlock_irq(&mdev->req_lock);
467 }
468
469 /* see also kernel_accept; which is only present since 2.6.18.
470  * also we want to log which part of it failed, exactly */
471 static int drbd_accept(struct drbd_conf *mdev, const char **what,
472                 struct socket *sock, struct socket **newsock)
473 {
474         struct sock *sk = sock->sk;
475         int err = 0;
476
477         *what = "listen";
478         err = sock->ops->listen(sock, 5);
479         if (err < 0)
480                 goto out;
481
482         *what = "sock_create_lite";
483         err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
484                                newsock);
485         if (err < 0)
486                 goto out;
487
488         *what = "accept";
489         err = sock->ops->accept(sock, *newsock, 0);
490         if (err < 0) {
491                 sock_release(*newsock);
492                 *newsock = NULL;
493                 goto out;
494         }
495         (*newsock)->ops  = sock->ops;
496
497 out:
498         return err;
499 }
500
501 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
502                     void *buf, size_t size, int flags)
503 {
504         mm_segment_t oldfs;
505         struct kvec iov = {
506                 .iov_base = buf,
507                 .iov_len = size,
508         };
509         struct msghdr msg = {
510                 .msg_iovlen = 1,
511                 .msg_iov = (struct iovec *)&iov,
512                 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
513         };
514         int rv;
515
516         oldfs = get_fs();
517         set_fs(KERNEL_DS);
518         rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
519         set_fs(oldfs);
520
521         return rv;
522 }
523
524 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
525 {
526         mm_segment_t oldfs;
527         struct kvec iov = {
528                 .iov_base = buf,
529                 .iov_len = size,
530         };
531         struct msghdr msg = {
532                 .msg_iovlen = 1,
533                 .msg_iov = (struct iovec *)&iov,
534                 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
535         };
536         int rv;
537
538         oldfs = get_fs();
539         set_fs(KERNEL_DS);
540
541         for (;;) {
542                 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
543                 if (rv == size)
544                         break;
545
546                 /* Note:
547                  * ECONNRESET   other side closed the connection
548                  * ERESTARTSYS  (on  sock) we got a signal
549                  */
550
551                 if (rv < 0) {
552                         if (rv == -ECONNRESET)
553                                 dev_info(DEV, "sock was reset by peer\n");
554                         else if (rv != -ERESTARTSYS)
555                                 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
556                         break;
557                 } else if (rv == 0) {
558                         dev_info(DEV, "sock was shut down by peer\n");
559                         break;
560                 } else  {
561                         /* signal came in, or peer/link went down,
562                          * after we read a partial message
563                          */
564                         /* D_ASSERT(signal_pending(current)); */
565                         break;
566                 }
567         };
568
569         set_fs(oldfs);
570
571         if (rv != size)
572                 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
573
574         return rv;
575 }
576
577 /* quoting tcp(7):
578  *   On individual connections, the socket buffer size must be set prior to the
579  *   listen(2) or connect(2) calls in order to have it take effect.
580  * This is our wrapper to do so.
581  */
582 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
583                 unsigned int rcv)
584 {
585         /* open coded SO_SNDBUF, SO_RCVBUF */
586         if (snd) {
587                 sock->sk->sk_sndbuf = snd;
588                 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
589         }
590         if (rcv) {
591                 sock->sk->sk_rcvbuf = rcv;
592                 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
593         }
594 }
595
596 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
597 {
598         const char *what;
599         struct socket *sock;
600         struct sockaddr_in6 src_in6;
601         int err;
602         int disconnect_on_error = 1;
603
604         if (!get_net_conf(mdev))
605                 return NULL;
606
607         what = "sock_create_kern";
608         err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
609                 SOCK_STREAM, IPPROTO_TCP, &sock);
610         if (err < 0) {
611                 sock = NULL;
612                 goto out;
613         }
614
615         sock->sk->sk_rcvtimeo =
616         sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
617         drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
618                         mdev->net_conf->rcvbuf_size);
619
620        /* explicitly bind to the configured IP as source IP
621         *  for the outgoing connections.
622         *  This is needed for multihomed hosts and to be
623         *  able to use lo: interfaces for drbd.
624         * Make sure to use 0 as port number, so linux selects
625         *  a free one dynamically.
626         */
627         memcpy(&src_in6, mdev->net_conf->my_addr,
628                min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
629         if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
630                 src_in6.sin6_port = 0;
631         else
632                 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
633
634         what = "bind before connect";
635         err = sock->ops->bind(sock,
636                               (struct sockaddr *) &src_in6,
637                               mdev->net_conf->my_addr_len);
638         if (err < 0)
639                 goto out;
640
641         /* connect may fail, peer not yet available.
642          * stay C_WF_CONNECTION, don't go Disconnecting! */
643         disconnect_on_error = 0;
644         what = "connect";
645         err = sock->ops->connect(sock,
646                                  (struct sockaddr *)mdev->net_conf->peer_addr,
647                                  mdev->net_conf->peer_addr_len, 0);
648
649 out:
650         if (err < 0) {
651                 if (sock) {
652                         sock_release(sock);
653                         sock = NULL;
654                 }
655                 switch (-err) {
656                         /* timeout, busy, signal pending */
657                 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658                 case EINTR: case ERESTARTSYS:
659                         /* peer not (yet) available, network problem */
660                 case ECONNREFUSED: case ENETUNREACH:
661                 case EHOSTDOWN:    case EHOSTUNREACH:
662                         disconnect_on_error = 0;
663                         break;
664                 default:
665                         dev_err(DEV, "%s failed, err = %d\n", what, err);
666                 }
667                 if (disconnect_on_error)
668                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
669         }
670         put_net_conf(mdev);
671         return sock;
672 }
673
674 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
675 {
676         int timeo, err;
677         struct socket *s_estab = NULL, *s_listen;
678         const char *what;
679
680         if (!get_net_conf(mdev))
681                 return NULL;
682
683         what = "sock_create_kern";
684         err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
685                 SOCK_STREAM, IPPROTO_TCP, &s_listen);
686         if (err) {
687                 s_listen = NULL;
688                 goto out;
689         }
690
691         timeo = mdev->net_conf->try_connect_int * HZ;
692         timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
693
694         s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
695         s_listen->sk->sk_rcvtimeo = timeo;
696         s_listen->sk->sk_sndtimeo = timeo;
697         drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
698                         mdev->net_conf->rcvbuf_size);
699
700         what = "bind before listen";
701         err = s_listen->ops->bind(s_listen,
702                               (struct sockaddr *) mdev->net_conf->my_addr,
703                               mdev->net_conf->my_addr_len);
704         if (err < 0)
705                 goto out;
706
707         err = drbd_accept(mdev, &what, s_listen, &s_estab);
708
709 out:
710         if (s_listen)
711                 sock_release(s_listen);
712         if (err < 0) {
713                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
714                         dev_err(DEV, "%s failed, err = %d\n", what, err);
715                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
716                 }
717         }
718         put_net_conf(mdev);
719
720         return s_estab;
721 }
722
723 static int drbd_send_fp(struct drbd_conf *mdev,
724         struct socket *sock, enum drbd_packets cmd)
725 {
726         struct p_header80 *h = &mdev->data.sbuf.header.h80;
727
728         return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
729 }
730
731 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
732 {
733         struct p_header80 *h = &mdev->data.rbuf.header.h80;
734         int rr;
735
736         rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
737
738         if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
739                 return be16_to_cpu(h->command);
740
741         return 0xffff;
742 }
743
744 /**
745  * drbd_socket_okay() - Free the socket if its connection is not okay
746  * @mdev:       DRBD device.
747  * @sock:       pointer to the pointer to the socket.
748  */
749 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
750 {
751         int rr;
752         char tb[4];
753
754         if (!*sock)
755                 return FALSE;
756
757         rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
758
759         if (rr > 0 || rr == -EAGAIN) {
760                 return TRUE;
761         } else {
762                 sock_release(*sock);
763                 *sock = NULL;
764                 return FALSE;
765         }
766 }
767
768 /*
769  * return values:
770  *   1 yes, we have a valid connection
771  *   0 oops, did not work out, please try again
772  *  -1 peer talks different language,
773  *     no point in trying again, please go standalone.
774  *  -2 We do not have a network config...
775  */
776 static int drbd_connect(struct drbd_conf *mdev)
777 {
778         struct socket *s, *sock, *msock;
779         int try, h, ok;
780
781         D_ASSERT(!mdev->data.socket);
782
783         if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
784                 return -2;
785
786         clear_bit(DISCARD_CONCURRENT, &mdev->flags);
787
788         sock  = NULL;
789         msock = NULL;
790
791         do {
792                 for (try = 0;;) {
793                         /* 3 tries, this should take less than a second! */
794                         s = drbd_try_connect(mdev);
795                         if (s || ++try >= 3)
796                                 break;
797                         /* give the other side time to call bind() & listen() */
798                         __set_current_state(TASK_INTERRUPTIBLE);
799                         schedule_timeout(HZ / 10);
800                 }
801
802                 if (s) {
803                         if (!sock) {
804                                 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
805                                 sock = s;
806                                 s = NULL;
807                         } else if (!msock) {
808                                 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
809                                 msock = s;
810                                 s = NULL;
811                         } else {
812                                 dev_err(DEV, "Logic error in drbd_connect()\n");
813                                 goto out_release_sockets;
814                         }
815                 }
816
817                 if (sock && msock) {
818                         __set_current_state(TASK_INTERRUPTIBLE);
819                         schedule_timeout(HZ / 10);
820                         ok = drbd_socket_okay(mdev, &sock);
821                         ok = drbd_socket_okay(mdev, &msock) && ok;
822                         if (ok)
823                                 break;
824                 }
825
826 retry:
827                 s = drbd_wait_for_connect(mdev);
828                 if (s) {
829                         try = drbd_recv_fp(mdev, s);
830                         drbd_socket_okay(mdev, &sock);
831                         drbd_socket_okay(mdev, &msock);
832                         switch (try) {
833                         case P_HAND_SHAKE_S:
834                                 if (sock) {
835                                         dev_warn(DEV, "initial packet S crossed\n");
836                                         sock_release(sock);
837                                 }
838                                 sock = s;
839                                 break;
840                         case P_HAND_SHAKE_M:
841                                 if (msock) {
842                                         dev_warn(DEV, "initial packet M crossed\n");
843                                         sock_release(msock);
844                                 }
845                                 msock = s;
846                                 set_bit(DISCARD_CONCURRENT, &mdev->flags);
847                                 break;
848                         default:
849                                 dev_warn(DEV, "Error receiving initial packet\n");
850                                 sock_release(s);
851                                 if (random32() & 1)
852                                         goto retry;
853                         }
854                 }
855
856                 if (mdev->state.conn <= C_DISCONNECTING)
857                         goto out_release_sockets;
858                 if (signal_pending(current)) {
859                         flush_signals(current);
860                         smp_rmb();
861                         if (get_t_state(&mdev->receiver) == Exiting)
862                                 goto out_release_sockets;
863                 }
864
865                 if (sock && msock) {
866                         ok = drbd_socket_okay(mdev, &sock);
867                         ok = drbd_socket_okay(mdev, &msock) && ok;
868                         if (ok)
869                                 break;
870                 }
871         } while (1);
872
873         msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
874         sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
875
876         sock->sk->sk_allocation = GFP_NOIO;
877         msock->sk->sk_allocation = GFP_NOIO;
878
879         sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
880         msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
881
882         /* NOT YET ...
883          * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
884          * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
885          * first set it to the P_HAND_SHAKE timeout,
886          * which we set to 4x the configured ping_timeout. */
887         sock->sk->sk_sndtimeo =
888         sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
889
890         msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
891         msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
892
893         /* we don't want delays.
894          * we use TCP_CORK where apropriate, though */
895         drbd_tcp_nodelay(sock);
896         drbd_tcp_nodelay(msock);
897
898         mdev->data.socket = sock;
899         mdev->meta.socket = msock;
900         mdev->last_received = jiffies;
901
902         D_ASSERT(mdev->asender.task == NULL);
903
904         h = drbd_do_handshake(mdev);
905         if (h <= 0)
906                 return h;
907
908         if (mdev->cram_hmac_tfm) {
909                 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
910                 switch (drbd_do_auth(mdev)) {
911                 case -1:
912                         dev_err(DEV, "Authentication of peer failed\n");
913                         return -1;
914                 case 0:
915                         dev_err(DEV, "Authentication of peer failed, trying again.\n");
916                         return 0;
917                 }
918         }
919
920         if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
921                 return 0;
922
923         sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
924         sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
925
926         atomic_set(&mdev->packet_seq, 0);
927         mdev->peer_seq = 0;
928
929         drbd_thread_start(&mdev->asender);
930
931         if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
932                 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
933                 put_ldev(mdev);
934         }
935
936         if (!drbd_send_protocol(mdev))
937                 return -1;
938         drbd_send_sync_param(mdev, &mdev->sync_conf);
939         drbd_send_sizes(mdev, 0, 0);
940         drbd_send_uuids(mdev);
941         drbd_send_state(mdev);
942         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
943         clear_bit(RESIZE_PENDING, &mdev->flags);
944
945         return 1;
946
947 out_release_sockets:
948         if (sock)
949                 sock_release(sock);
950         if (msock)
951                 sock_release(msock);
952         return -1;
953 }
954
955 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
956 {
957         union p_header *h = &mdev->data.rbuf.header;
958         int r;
959
960         r = drbd_recv(mdev, h, sizeof(*h));
961         if (unlikely(r != sizeof(*h))) {
962                 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
963                 return FALSE;
964         }
965
966         if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
967                 *cmd = be16_to_cpu(h->h80.command);
968                 *packet_size = be16_to_cpu(h->h80.length);
969         } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
970                 *cmd = be16_to_cpu(h->h95.command);
971                 *packet_size = be32_to_cpu(h->h95.length);
972         } else {
973                 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
974                     be32_to_cpu(h->h80.magic),
975                     be16_to_cpu(h->h80.command),
976                     be16_to_cpu(h->h80.length));
977                 return FALSE;
978         }
979         mdev->last_received = jiffies;
980
981         return TRUE;
982 }
983
984 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
985 {
986         int rv;
987
988         if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
989                 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
990                                         NULL);
991                 if (rv) {
992                         dev_err(DEV, "local disk flush failed with status %d\n", rv);
993                         /* would rather check on EOPNOTSUPP, but that is not reliable.
994                          * don't try again for ANY return value != 0
995                          * if (rv == -EOPNOTSUPP) */
996                         drbd_bump_write_ordering(mdev, WO_drain_io);
997                 }
998                 put_ldev(mdev);
999         }
1000
1001         return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1002 }
1003
1004 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1005 {
1006         struct flush_work *fw = (struct flush_work *)w;
1007         struct drbd_epoch *epoch = fw->epoch;
1008
1009         kfree(w);
1010
1011         if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1012                 drbd_flush_after_epoch(mdev, epoch);
1013
1014         drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1015                               (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1016
1017         return 1;
1018 }
1019
1020 /**
1021  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1022  * @mdev:       DRBD device.
1023  * @epoch:      Epoch object.
1024  * @ev:         Epoch event.
1025  */
1026 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1027                                                struct drbd_epoch *epoch,
1028                                                enum epoch_event ev)
1029 {
1030         int finish, epoch_size;
1031         struct drbd_epoch *next_epoch;
1032         int schedule_flush = 0;
1033         enum finish_epoch rv = FE_STILL_LIVE;
1034
1035         spin_lock(&mdev->epoch_lock);
1036         do {
1037                 next_epoch = NULL;
1038                 finish = 0;
1039
1040                 epoch_size = atomic_read(&epoch->epoch_size);
1041
1042                 switch (ev & ~EV_CLEANUP) {
1043                 case EV_PUT:
1044                         atomic_dec(&epoch->active);
1045                         break;
1046                 case EV_GOT_BARRIER_NR:
1047                         set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1048
1049                         /* Special case: If we just switched from WO_bio_barrier to
1050                            WO_bdev_flush we should not finish the current epoch */
1051                         if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1052                             mdev->write_ordering != WO_bio_barrier &&
1053                             epoch == mdev->current_epoch)
1054                                 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1055                         break;
1056                 case EV_BARRIER_DONE:
1057                         set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1058                         break;
1059                 case EV_BECAME_LAST:
1060                         /* nothing to do*/
1061                         break;
1062                 }
1063
1064                 if (epoch_size != 0 &&
1065                     atomic_read(&epoch->active) == 0 &&
1066                     test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1067                     epoch->list.prev == &mdev->current_epoch->list &&
1068                     !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1069                         /* Nearly all conditions are met to finish that epoch... */
1070                         if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1071                             mdev->write_ordering == WO_none ||
1072                             (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1073                             ev & EV_CLEANUP) {
1074                                 finish = 1;
1075                                 set_bit(DE_IS_FINISHING, &epoch->flags);
1076                         } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1077                                  mdev->write_ordering == WO_bio_barrier) {
1078                                 atomic_inc(&epoch->active);
1079                                 schedule_flush = 1;
1080                         }
1081                 }
1082                 if (finish) {
1083                         if (!(ev & EV_CLEANUP)) {
1084                                 spin_unlock(&mdev->epoch_lock);
1085                                 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1086                                 spin_lock(&mdev->epoch_lock);
1087                         }
1088                         dec_unacked(mdev);
1089
1090                         if (mdev->current_epoch != epoch) {
1091                                 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1092                                 list_del(&epoch->list);
1093                                 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1094                                 mdev->epochs--;
1095                                 kfree(epoch);
1096
1097                                 if (rv == FE_STILL_LIVE)
1098                                         rv = FE_DESTROYED;
1099                         } else {
1100                                 epoch->flags = 0;
1101                                 atomic_set(&epoch->epoch_size, 0);
1102                                 /* atomic_set(&epoch->active, 0); is already zero */
1103                                 if (rv == FE_STILL_LIVE)
1104                                         rv = FE_RECYCLED;
1105                         }
1106                 }
1107
1108                 if (!next_epoch)
1109                         break;
1110
1111                 epoch = next_epoch;
1112         } while (1);
1113
1114         spin_unlock(&mdev->epoch_lock);
1115
1116         if (schedule_flush) {
1117                 struct flush_work *fw;
1118                 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1119                 if (fw) {
1120                         fw->w.cb = w_flush;
1121                         fw->epoch = epoch;
1122                         drbd_queue_work(&mdev->data.work, &fw->w);
1123                 } else {
1124                         dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1125                         set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1126                         /* That is not a recursion, only one level */
1127                         drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1128                         drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1129                 }
1130         }
1131
1132         return rv;
1133 }
1134
1135 /**
1136  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1137  * @mdev:       DRBD device.
1138  * @wo:         Write ordering method to try.
1139  */
1140 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1141 {
1142         enum write_ordering_e pwo;
1143         static char *write_ordering_str[] = {
1144                 [WO_none] = "none",
1145                 [WO_drain_io] = "drain",
1146                 [WO_bdev_flush] = "flush",
1147                 [WO_bio_barrier] = "barrier",
1148         };
1149
1150         pwo = mdev->write_ordering;
1151         wo = min(pwo, wo);
1152         if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1153                 wo = WO_bdev_flush;
1154         if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1155                 wo = WO_drain_io;
1156         if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1157                 wo = WO_none;
1158         mdev->write_ordering = wo;
1159         if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1160                 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1161 }
1162
1163 /**
1164  * drbd_submit_ee()
1165  * @mdev:       DRBD device.
1166  * @e:          epoch entry
1167  * @rw:         flag field, see bio->bi_rw
1168  */
1169 /* TODO allocate from our own bio_set. */
1170 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1171                 const unsigned rw, const int fault_type)
1172 {
1173         struct bio *bios = NULL;
1174         struct bio *bio;
1175         struct page *page = e->pages;
1176         sector_t sector = e->sector;
1177         unsigned ds = e->size;
1178         unsigned n_bios = 0;
1179         unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1180
1181         /* In most cases, we will only need one bio.  But in case the lower
1182          * level restrictions happen to be different at this offset on this
1183          * side than those of the sending peer, we may need to submit the
1184          * request in more than one bio. */
1185 next_bio:
1186         bio = bio_alloc(GFP_NOIO, nr_pages);
1187         if (!bio) {
1188                 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1189                 goto fail;
1190         }
1191         /* > e->sector, unless this is the first bio */
1192         bio->bi_sector = sector;
1193         bio->bi_bdev = mdev->ldev->backing_bdev;
1194         /* we special case some flags in the multi-bio case, see below
1195          * (REQ_UNPLUG, REQ_HARDBARRIER) */
1196         bio->bi_rw = rw;
1197         bio->bi_private = e;
1198         bio->bi_end_io = drbd_endio_sec;
1199
1200         bio->bi_next = bios;
1201         bios = bio;
1202         ++n_bios;
1203
1204         page_chain_for_each(page) {
1205                 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1206                 if (!bio_add_page(bio, page, len, 0)) {
1207                         /* a single page must always be possible! */
1208                         BUG_ON(bio->bi_vcnt == 0);
1209                         goto next_bio;
1210                 }
1211                 ds -= len;
1212                 sector += len >> 9;
1213                 --nr_pages;
1214         }
1215         D_ASSERT(page == NULL);
1216         D_ASSERT(ds == 0);
1217
1218         atomic_set(&e->pending_bios, n_bios);
1219         do {
1220                 bio = bios;
1221                 bios = bios->bi_next;
1222                 bio->bi_next = NULL;
1223
1224                 /* strip off REQ_UNPLUG unless it is the last bio */
1225                 if (bios)
1226                         bio->bi_rw &= ~REQ_UNPLUG;
1227
1228                 drbd_generic_make_request(mdev, fault_type, bio);
1229
1230                 /* strip off REQ_HARDBARRIER,
1231                  * unless it is the first or last bio */
1232                 if (bios && bios->bi_next)
1233                         bios->bi_rw &= ~REQ_HARDBARRIER;
1234         } while (bios);
1235         maybe_kick_lo(mdev);
1236         return 0;
1237
1238 fail:
1239         while (bios) {
1240                 bio = bios;
1241                 bios = bios->bi_next;
1242                 bio_put(bio);
1243         }
1244         return -ENOMEM;
1245 }
1246
1247 /**
1248  * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1249  * @mdev:       DRBD device.
1250  * @w:          work object.
1251  * @cancel:     The connection will be closed anyways (unused in this callback)
1252  */
1253 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1254 {
1255         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1256         /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1257            (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1258            so that we can finish that epoch in drbd_may_finish_epoch().
1259            That is necessary if we already have a long chain of Epochs, before
1260            we realize that REQ_HARDBARRIER is actually not supported */
1261
1262         /* As long as the -ENOTSUPP on the barrier is reported immediately
1263            that will never trigger. If it is reported late, we will just
1264            print that warning and continue correctly for all future requests
1265            with WO_bdev_flush */
1266         if (previous_epoch(mdev, e->epoch))
1267                 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1268
1269         /* we still have a local reference,
1270          * get_ldev was done in receive_Data. */
1271
1272         e->w.cb = e_end_block;
1273         if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1274                 /* drbd_submit_ee fails for one reason only:
1275                  * if was not able to allocate sufficient bios.
1276                  * requeue, try again later. */
1277                 e->w.cb = w_e_reissue;
1278                 drbd_queue_work(&mdev->data.work, &e->w);
1279         }
1280         return 1;
1281 }
1282
1283 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1284 {
1285         int rv, issue_flush;
1286         struct p_barrier *p = &mdev->data.rbuf.barrier;
1287         struct drbd_epoch *epoch;
1288
1289         inc_unacked(mdev);
1290
1291         if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1292                 drbd_kick_lo(mdev);
1293
1294         mdev->current_epoch->barrier_nr = p->barrier;
1295         rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1296
1297         /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1298          * the activity log, which means it would not be resynced in case the
1299          * R_PRIMARY crashes now.
1300          * Therefore we must send the barrier_ack after the barrier request was
1301          * completed. */
1302         switch (mdev->write_ordering) {
1303         case WO_bio_barrier:
1304         case WO_none:
1305                 if (rv == FE_RECYCLED)
1306                         return TRUE;
1307                 break;
1308
1309         case WO_bdev_flush:
1310         case WO_drain_io:
1311                 if (rv == FE_STILL_LIVE) {
1312                         set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1313                         drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1314                         rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1315                 }
1316                 if (rv == FE_RECYCLED)
1317                         return TRUE;
1318
1319                 /* The asender will send all the ACKs and barrier ACKs out, since
1320                    all EEs moved from the active_ee to the done_ee. We need to
1321                    provide a new epoch object for the EEs that come in soon */
1322                 break;
1323         }
1324
1325         /* receiver context, in the writeout path of the other node.
1326          * avoid potential distributed deadlock */
1327         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1328         if (!epoch) {
1329                 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1330                 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1331                 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1332                 if (issue_flush) {
1333                         rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1334                         if (rv == FE_RECYCLED)
1335                                 return TRUE;
1336                 }
1337
1338                 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1339
1340                 return TRUE;
1341         }
1342
1343         epoch->flags = 0;
1344         atomic_set(&epoch->epoch_size, 0);
1345         atomic_set(&epoch->active, 0);
1346
1347         spin_lock(&mdev->epoch_lock);
1348         if (atomic_read(&mdev->current_epoch->epoch_size)) {
1349                 list_add(&epoch->list, &mdev->current_epoch->list);
1350                 mdev->current_epoch = epoch;
1351                 mdev->epochs++;
1352         } else {
1353                 /* The current_epoch got recycled while we allocated this one... */
1354                 kfree(epoch);
1355         }
1356         spin_unlock(&mdev->epoch_lock);
1357
1358         return TRUE;
1359 }
1360
1361 /* used from receive_RSDataReply (recv_resync_read)
1362  * and from receive_Data */
1363 static struct drbd_epoch_entry *
1364 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1365 {
1366         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1367         struct drbd_epoch_entry *e;
1368         struct page *page;
1369         int dgs, ds, rr;
1370         void *dig_in = mdev->int_dig_in;
1371         void *dig_vv = mdev->int_dig_vv;
1372         unsigned long *data;
1373
1374         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1375                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1376
1377         if (dgs) {
1378                 rr = drbd_recv(mdev, dig_in, dgs);
1379                 if (rr != dgs) {
1380                         dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1381                              rr, dgs);
1382                         return NULL;
1383                 }
1384         }
1385
1386         data_size -= dgs;
1387
1388         ERR_IF(data_size &  0x1ff) return NULL;
1389         ERR_IF(data_size >  DRBD_MAX_SEGMENT_SIZE) return NULL;
1390
1391         /* even though we trust out peer,
1392          * we sometimes have to double check. */
1393         if (sector + (data_size>>9) > capacity) {
1394                 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1395                         (unsigned long long)capacity,
1396                         (unsigned long long)sector, data_size);
1397                 return NULL;
1398         }
1399
1400         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1401          * "criss-cross" setup, that might cause write-out on some other DRBD,
1402          * which in turn might block on the other node at this very place.  */
1403         e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1404         if (!e)
1405                 return NULL;
1406
1407         ds = data_size;
1408         page = e->pages;
1409         page_chain_for_each(page) {
1410                 unsigned len = min_t(int, ds, PAGE_SIZE);
1411                 data = kmap(page);
1412                 rr = drbd_recv(mdev, data, len);
1413                 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1414                         dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1415                         data[0] = data[0] ^ (unsigned long)-1;
1416                 }
1417                 kunmap(page);
1418                 if (rr != len) {
1419                         drbd_free_ee(mdev, e);
1420                         dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1421                              rr, len);
1422                         return NULL;
1423                 }
1424                 ds -= rr;
1425         }
1426
1427         if (dgs) {
1428                 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1429                 if (memcmp(dig_in, dig_vv, dgs)) {
1430                         dev_err(DEV, "Digest integrity check FAILED.\n");
1431                         drbd_bcast_ee(mdev, "digest failed",
1432                                         dgs, dig_in, dig_vv, e);
1433                         drbd_free_ee(mdev, e);
1434                         return NULL;
1435                 }
1436         }
1437         mdev->recv_cnt += data_size>>9;
1438         return e;
1439 }
1440
1441 /* drbd_drain_block() just takes a data block
1442  * out of the socket input buffer, and discards it.
1443  */
1444 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1445 {
1446         struct page *page;
1447         int rr, rv = 1;
1448         void *data;
1449
1450         if (!data_size)
1451                 return TRUE;
1452
1453         page = drbd_pp_alloc(mdev, 1, 1);
1454
1455         data = kmap(page);
1456         while (data_size) {
1457                 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1458                 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1459                         rv = 0;
1460                         dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1461                              rr, min_t(int, data_size, PAGE_SIZE));
1462                         break;
1463                 }
1464                 data_size -= rr;
1465         }
1466         kunmap(page);
1467         drbd_pp_free(mdev, page, 0);
1468         return rv;
1469 }
1470
1471 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1472                            sector_t sector, int data_size)
1473 {
1474         struct bio_vec *bvec;
1475         struct bio *bio;
1476         int dgs, rr, i, expect;
1477         void *dig_in = mdev->int_dig_in;
1478         void *dig_vv = mdev->int_dig_vv;
1479
1480         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1481                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1482
1483         if (dgs) {
1484                 rr = drbd_recv(mdev, dig_in, dgs);
1485                 if (rr != dgs) {
1486                         dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1487                              rr, dgs);
1488                         return 0;
1489                 }
1490         }
1491
1492         data_size -= dgs;
1493
1494         /* optimistically update recv_cnt.  if receiving fails below,
1495          * we disconnect anyways, and counters will be reset. */
1496         mdev->recv_cnt += data_size>>9;
1497
1498         bio = req->master_bio;
1499         D_ASSERT(sector == bio->bi_sector);
1500
1501         bio_for_each_segment(bvec, bio, i) {
1502                 expect = min_t(int, data_size, bvec->bv_len);
1503                 rr = drbd_recv(mdev,
1504                              kmap(bvec->bv_page)+bvec->bv_offset,
1505                              expect);
1506                 kunmap(bvec->bv_page);
1507                 if (rr != expect) {
1508                         dev_warn(DEV, "short read receiving data reply: "
1509                              "read %d expected %d\n",
1510                              rr, expect);
1511                         return 0;
1512                 }
1513                 data_size -= rr;
1514         }
1515
1516         if (dgs) {
1517                 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1518                 if (memcmp(dig_in, dig_vv, dgs)) {
1519                         dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1520                         return 0;
1521                 }
1522         }
1523
1524         D_ASSERT(data_size == 0);
1525         return 1;
1526 }
1527
1528 /* e_end_resync_block() is called via
1529  * drbd_process_done_ee() by asender only */
1530 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1531 {
1532         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1533         sector_t sector = e->sector;
1534         int ok;
1535
1536         D_ASSERT(hlist_unhashed(&e->colision));
1537
1538         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1539                 drbd_set_in_sync(mdev, sector, e->size);
1540                 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1541         } else {
1542                 /* Record failure to sync */
1543                 drbd_rs_failed_io(mdev, sector, e->size);
1544
1545                 ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1546         }
1547         dec_unacked(mdev);
1548
1549         return ok;
1550 }
1551
1552 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1553 {
1554         struct drbd_epoch_entry *e;
1555
1556         e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1557         if (!e)
1558                 goto fail;
1559
1560         dec_rs_pending(mdev);
1561
1562         inc_unacked(mdev);
1563         /* corresponding dec_unacked() in e_end_resync_block()
1564          * respective _drbd_clear_done_ee */
1565
1566         e->w.cb = e_end_resync_block;
1567
1568         spin_lock_irq(&mdev->req_lock);
1569         list_add(&e->w.list, &mdev->sync_ee);
1570         spin_unlock_irq(&mdev->req_lock);
1571
1572         atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1573         if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1574                 return TRUE;
1575
1576         /* drbd_submit_ee currently fails for one reason only:
1577          * not being able to allocate enough bios.
1578          * Is dropping the connection going to help? */
1579         spin_lock_irq(&mdev->req_lock);
1580         list_del(&e->w.list);
1581         spin_unlock_irq(&mdev->req_lock);
1582
1583         drbd_free_ee(mdev, e);
1584 fail:
1585         put_ldev(mdev);
1586         return FALSE;
1587 }
1588
1589 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1590 {
1591         struct drbd_request *req;
1592         sector_t sector;
1593         int ok;
1594         struct p_data *p = &mdev->data.rbuf.data;
1595
1596         sector = be64_to_cpu(p->sector);
1597
1598         spin_lock_irq(&mdev->req_lock);
1599         req = _ar_id_to_req(mdev, p->block_id, sector);
1600         spin_unlock_irq(&mdev->req_lock);
1601         if (unlikely(!req)) {
1602                 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1603                 return FALSE;
1604         }
1605
1606         /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1607          * special casing it there for the various failure cases.
1608          * still no race with drbd_fail_pending_reads */
1609         ok = recv_dless_read(mdev, req, sector, data_size);
1610
1611         if (ok)
1612                 req_mod(req, data_received);
1613         /* else: nothing. handled from drbd_disconnect...
1614          * I don't think we may complete this just yet
1615          * in case we are "on-disconnect: freeze" */
1616
1617         return ok;
1618 }
1619
1620 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1621 {
1622         sector_t sector;
1623         int ok;
1624         struct p_data *p = &mdev->data.rbuf.data;
1625
1626         sector = be64_to_cpu(p->sector);
1627         D_ASSERT(p->block_id == ID_SYNCER);
1628
1629         if (get_ldev(mdev)) {
1630                 /* data is submitted to disk within recv_resync_read.
1631                  * corresponding put_ldev done below on error,
1632                  * or in drbd_endio_write_sec. */
1633                 ok = recv_resync_read(mdev, sector, data_size);
1634         } else {
1635                 if (__ratelimit(&drbd_ratelimit_state))
1636                         dev_err(DEV, "Can not write resync data to local disk.\n");
1637
1638                 ok = drbd_drain_block(mdev, data_size);
1639
1640                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1641         }
1642
1643         atomic_add(data_size >> 9, &mdev->rs_sect_in);
1644
1645         return ok;
1646 }
1647
1648 /* e_end_block() is called via drbd_process_done_ee().
1649  * this means this function only runs in the asender thread
1650  */
1651 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1652 {
1653         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1654         sector_t sector = e->sector;
1655         struct drbd_epoch *epoch;
1656         int ok = 1, pcmd;
1657
1658         if (e->flags & EE_IS_BARRIER) {
1659                 epoch = previous_epoch(mdev, e->epoch);
1660                 if (epoch)
1661                         drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1662         }
1663
1664         if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1665                 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1666                         pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1667                                 mdev->state.conn <= C_PAUSED_SYNC_T &&
1668                                 e->flags & EE_MAY_SET_IN_SYNC) ?
1669                                 P_RS_WRITE_ACK : P_WRITE_ACK;
1670                         ok &= drbd_send_ack(mdev, pcmd, e);
1671                         if (pcmd == P_RS_WRITE_ACK)
1672                                 drbd_set_in_sync(mdev, sector, e->size);
1673                 } else {
1674                         ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1675                         /* we expect it to be marked out of sync anyways...
1676                          * maybe assert this?  */
1677                 }
1678                 dec_unacked(mdev);
1679         }
1680         /* we delete from the conflict detection hash _after_ we sent out the
1681          * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1682         if (mdev->net_conf->two_primaries) {
1683                 spin_lock_irq(&mdev->req_lock);
1684                 D_ASSERT(!hlist_unhashed(&e->colision));
1685                 hlist_del_init(&e->colision);
1686                 spin_unlock_irq(&mdev->req_lock);
1687         } else {
1688                 D_ASSERT(hlist_unhashed(&e->colision));
1689         }
1690
1691         drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1692
1693         return ok;
1694 }
1695
1696 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1697 {
1698         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1699         int ok = 1;
1700
1701         D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1702         ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1703
1704         spin_lock_irq(&mdev->req_lock);
1705         D_ASSERT(!hlist_unhashed(&e->colision));
1706         hlist_del_init(&e->colision);
1707         spin_unlock_irq(&mdev->req_lock);
1708
1709         dec_unacked(mdev);
1710
1711         return ok;
1712 }
1713
1714 /* Called from receive_Data.
1715  * Synchronize packets on sock with packets on msock.
1716  *
1717  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1718  * packet traveling on msock, they are still processed in the order they have
1719  * been sent.
1720  *
1721  * Note: we don't care for Ack packets overtaking P_DATA packets.
1722  *
1723  * In case packet_seq is larger than mdev->peer_seq number, there are
1724  * outstanding packets on the msock. We wait for them to arrive.
1725  * In case we are the logically next packet, we update mdev->peer_seq
1726  * ourselves. Correctly handles 32bit wrap around.
1727  *
1728  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1729  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1730  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1731  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1732  *
1733  * returns 0 if we may process the packet,
1734  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1735 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1736 {
1737         DEFINE_WAIT(wait);
1738         unsigned int p_seq;
1739         long timeout;
1740         int ret = 0;
1741         spin_lock(&mdev->peer_seq_lock);
1742         for (;;) {
1743                 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1744                 if (seq_le(packet_seq, mdev->peer_seq+1))
1745                         break;
1746                 if (signal_pending(current)) {
1747                         ret = -ERESTARTSYS;
1748                         break;
1749                 }
1750                 p_seq = mdev->peer_seq;
1751                 spin_unlock(&mdev->peer_seq_lock);
1752                 timeout = schedule_timeout(30*HZ);
1753                 spin_lock(&mdev->peer_seq_lock);
1754                 if (timeout == 0 && p_seq == mdev->peer_seq) {
1755                         ret = -ETIMEDOUT;
1756                         dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1757                         break;
1758                 }
1759         }
1760         finish_wait(&mdev->seq_wait, &wait);
1761         if (mdev->peer_seq+1 == packet_seq)
1762                 mdev->peer_seq++;
1763         spin_unlock(&mdev->peer_seq_lock);
1764         return ret;
1765 }
1766
1767 static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1768 {
1769         if (mdev->agreed_pro_version >= 95)
1770                 return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1771                         (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1772                         (dpf & DP_FUA ? REQ_FUA : 0) |
1773                         (dpf & DP_FLUSH ? REQ_FUA : 0) |
1774                         (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1775         else
1776                 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1777 }
1778
1779 /* mirrored write */
1780 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1781 {
1782         sector_t sector;
1783         struct drbd_epoch_entry *e;
1784         struct p_data *p = &mdev->data.rbuf.data;
1785         int rw = WRITE;
1786         u32 dp_flags;
1787
1788         if (!get_ldev(mdev)) {
1789                 if (__ratelimit(&drbd_ratelimit_state))
1790                         dev_err(DEV, "Can not write mirrored data block "
1791                             "to local disk.\n");
1792                 spin_lock(&mdev->peer_seq_lock);
1793                 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1794                         mdev->peer_seq++;
1795                 spin_unlock(&mdev->peer_seq_lock);
1796
1797                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1798                 atomic_inc(&mdev->current_epoch->epoch_size);
1799                 return drbd_drain_block(mdev, data_size);
1800         }
1801
1802         /* get_ldev(mdev) successful.
1803          * Corresponding put_ldev done either below (on various errors),
1804          * or in drbd_endio_write_sec, if we successfully submit the data at
1805          * the end of this function. */
1806
1807         sector = be64_to_cpu(p->sector);
1808         e = read_in_block(mdev, p->block_id, sector, data_size);
1809         if (!e) {
1810                 put_ldev(mdev);
1811                 return FALSE;
1812         }
1813
1814         e->w.cb = e_end_block;
1815
1816         spin_lock(&mdev->epoch_lock);
1817         e->epoch = mdev->current_epoch;
1818         atomic_inc(&e->epoch->epoch_size);
1819         atomic_inc(&e->epoch->active);
1820
1821         if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1822                 struct drbd_epoch *epoch;
1823                 /* Issue a barrier if we start a new epoch, and the previous epoch
1824                    was not a epoch containing a single request which already was
1825                    a Barrier. */
1826                 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1827                 if (epoch == e->epoch) {
1828                         set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1829                         rw |= REQ_HARDBARRIER;
1830                         e->flags |= EE_IS_BARRIER;
1831                 } else {
1832                         if (atomic_read(&epoch->epoch_size) > 1 ||
1833                             !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1834                                 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1835                                 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1836                                 rw |= REQ_HARDBARRIER;
1837                                 e->flags |= EE_IS_BARRIER;
1838                         }
1839                 }
1840         }
1841         spin_unlock(&mdev->epoch_lock);
1842
1843         dp_flags = be32_to_cpu(p->dp_flags);
1844         rw |= write_flags_to_bio(mdev, dp_flags);
1845
1846         if (dp_flags & DP_MAY_SET_IN_SYNC)
1847                 e->flags |= EE_MAY_SET_IN_SYNC;
1848
1849         /* I'm the receiver, I do hold a net_cnt reference. */
1850         if (!mdev->net_conf->two_primaries) {
1851                 spin_lock_irq(&mdev->req_lock);
1852         } else {
1853                 /* don't get the req_lock yet,
1854                  * we may sleep in drbd_wait_peer_seq */
1855                 const int size = e->size;
1856                 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1857                 DEFINE_WAIT(wait);
1858                 struct drbd_request *i;
1859                 struct hlist_node *n;
1860                 struct hlist_head *slot;
1861                 int first;
1862
1863                 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1864                 BUG_ON(mdev->ee_hash == NULL);
1865                 BUG_ON(mdev->tl_hash == NULL);
1866
1867                 /* conflict detection and handling:
1868                  * 1. wait on the sequence number,
1869                  *    in case this data packet overtook ACK packets.
1870                  * 2. check our hash tables for conflicting requests.
1871                  *    we only need to walk the tl_hash, since an ee can not
1872                  *    have a conflict with an other ee: on the submitting
1873                  *    node, the corresponding req had already been conflicting,
1874                  *    and a conflicting req is never sent.
1875                  *
1876                  * Note: for two_primaries, we are protocol C,
1877                  * so there cannot be any request that is DONE
1878                  * but still on the transfer log.
1879                  *
1880                  * unconditionally add to the ee_hash.
1881                  *
1882                  * if no conflicting request is found:
1883                  *    submit.
1884                  *
1885                  * if any conflicting request is found
1886                  * that has not yet been acked,
1887                  * AND I have the "discard concurrent writes" flag:
1888                  *       queue (via done_ee) the P_DISCARD_ACK; OUT.
1889                  *
1890                  * if any conflicting request is found:
1891                  *       block the receiver, waiting on misc_wait
1892                  *       until no more conflicting requests are there,
1893                  *       or we get interrupted (disconnect).
1894                  *
1895                  *       we do not just write after local io completion of those
1896                  *       requests, but only after req is done completely, i.e.
1897                  *       we wait for the P_DISCARD_ACK to arrive!
1898                  *
1899                  *       then proceed normally, i.e. submit.
1900                  */
1901                 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1902                         goto out_interrupted;
1903
1904                 spin_lock_irq(&mdev->req_lock);
1905
1906                 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1907
1908 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1909                 slot = tl_hash_slot(mdev, sector);
1910                 first = 1;
1911                 for (;;) {
1912                         int have_unacked = 0;
1913                         int have_conflict = 0;
1914                         prepare_to_wait(&mdev->misc_wait, &wait,
1915                                 TASK_INTERRUPTIBLE);
1916                         hlist_for_each_entry(i, n, slot, colision) {
1917                                 if (OVERLAPS) {
1918                                         /* only ALERT on first iteration,
1919                                          * we may be woken up early... */
1920                                         if (first)
1921                                                 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1922                                                       " new: %llus +%u; pending: %llus +%u\n",
1923                                                       current->comm, current->pid,
1924                                                       (unsigned long long)sector, size,
1925                                                       (unsigned long long)i->sector, i->size);
1926                                         if (i->rq_state & RQ_NET_PENDING)
1927                                                 ++have_unacked;
1928                                         ++have_conflict;
1929                                 }
1930                         }
1931 #undef OVERLAPS
1932                         if (!have_conflict)
1933                                 break;
1934
1935                         /* Discard Ack only for the _first_ iteration */
1936                         if (first && discard && have_unacked) {
1937                                 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1938                                      (unsigned long long)sector);
1939                                 inc_unacked(mdev);
1940                                 e->w.cb = e_send_discard_ack;
1941                                 list_add_tail(&e->w.list, &mdev->done_ee);
1942
1943                                 spin_unlock_irq(&mdev->req_lock);
1944
1945                                 /* we could probably send that P_DISCARD_ACK ourselves,
1946                                  * but I don't like the receiver using the msock */
1947
1948                                 put_ldev(mdev);
1949                                 wake_asender(mdev);
1950                                 finish_wait(&mdev->misc_wait, &wait);
1951                                 return TRUE;
1952                         }
1953
1954                         if (signal_pending(current)) {
1955                                 hlist_del_init(&e->colision);
1956
1957                                 spin_unlock_irq(&mdev->req_lock);
1958
1959                                 finish_wait(&mdev->misc_wait, &wait);
1960                                 goto out_interrupted;
1961                         }
1962
1963                         spin_unlock_irq(&mdev->req_lock);
1964                         if (first) {
1965                                 first = 0;
1966                                 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1967                                      "sec=%llus\n", (unsigned long long)sector);
1968                         } else if (discard) {
1969                                 /* we had none on the first iteration.
1970                                  * there must be none now. */
1971                                 D_ASSERT(have_unacked == 0);
1972                         }
1973                         schedule();
1974                         spin_lock_irq(&mdev->req_lock);
1975                 }
1976                 finish_wait(&mdev->misc_wait, &wait);
1977         }
1978
1979         list_add(&e->w.list, &mdev->active_ee);
1980         spin_unlock_irq(&mdev->req_lock);
1981
1982         switch (mdev->net_conf->wire_protocol) {
1983         case DRBD_PROT_C:
1984                 inc_unacked(mdev);
1985                 /* corresponding dec_unacked() in e_end_block()
1986                  * respective _drbd_clear_done_ee */
1987                 break;
1988         case DRBD_PROT_B:
1989                 /* I really don't like it that the receiver thread
1990                  * sends on the msock, but anyways */
1991                 drbd_send_ack(mdev, P_RECV_ACK, e);
1992                 break;
1993         case DRBD_PROT_A:
1994                 /* nothing to do */
1995                 break;
1996         }
1997
1998         if (mdev->state.pdsk == D_DISKLESS) {
1999                 /* In case we have the only disk of the cluster, */
2000                 drbd_set_out_of_sync(mdev, e->sector, e->size);
2001                 e->flags |= EE_CALL_AL_COMPLETE_IO;
2002                 drbd_al_begin_io(mdev, e->sector);
2003         }
2004
2005         if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2006                 return TRUE;
2007
2008         /* drbd_submit_ee currently fails for one reason only:
2009          * not being able to allocate enough bios.
2010          * Is dropping the connection going to help? */
2011         spin_lock_irq(&mdev->req_lock);
2012         list_del(&e->w.list);
2013         hlist_del_init(&e->colision);
2014         spin_unlock_irq(&mdev->req_lock);
2015         if (e->flags & EE_CALL_AL_COMPLETE_IO)
2016                 drbd_al_complete_io(mdev, e->sector);
2017
2018 out_interrupted:
2019         /* yes, the epoch_size now is imbalanced.
2020          * but we drop the connection anyways, so we don't have a chance to
2021          * receive a barrier... atomic_inc(&mdev->epoch_size); */
2022         put_ldev(mdev);
2023         drbd_free_ee(mdev, e);
2024         return FALSE;
2025 }
2026
2027 /* We may throttle resync, if the lower device seems to be busy,
2028  * and current sync rate is above c_min_rate.
2029  *
2030  * To decide whether or not the lower device is busy, we use a scheme similar
2031  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2032  * (more than 64 sectors) of activity we cannot account for with our own resync
2033  * activity, it obviously is "busy".
2034  *
2035  * The current sync rate used here uses only the most recent two step marks,
2036  * to have a short time average so we can react faster.
2037  */
2038 int drbd_rs_should_slow_down(struct drbd_conf *mdev)
2039 {
2040         struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2041         unsigned long db, dt, dbdt;
2042         int curr_events;
2043         int throttle = 0;
2044
2045         /* feature disabled? */
2046         if (mdev->sync_conf.c_min_rate == 0)
2047                 return 0;
2048
2049         curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2050                       (int)part_stat_read(&disk->part0, sectors[1]) -
2051                         atomic_read(&mdev->rs_sect_ev);
2052         if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2053                 unsigned long rs_left;
2054                 int i;
2055
2056                 mdev->rs_last_events = curr_events;
2057
2058                 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2059                  * approx. */
2060                 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
2061                 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2062
2063                 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2064                 if (!dt)
2065                         dt++;
2066                 db = mdev->rs_mark_left[i] - rs_left;
2067                 dbdt = Bit2KB(db/dt);
2068
2069                 if (dbdt > mdev->sync_conf.c_min_rate)
2070                         throttle = 1;
2071         }
2072         return throttle;
2073 }
2074
2075
2076 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
2077 {
2078         sector_t sector;
2079         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2080         struct drbd_epoch_entry *e;
2081         struct digest_info *di = NULL;
2082         int size, verb;
2083         unsigned int fault_type;
2084         struct p_block_req *p = &mdev->data.rbuf.block_req;
2085
2086         sector = be64_to_cpu(p->sector);
2087         size   = be32_to_cpu(p->blksize);
2088
2089         if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2090                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2091                                 (unsigned long long)sector, size);
2092                 return FALSE;
2093         }
2094         if (sector + (size>>9) > capacity) {
2095                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2096                                 (unsigned long long)sector, size);
2097                 return FALSE;
2098         }
2099
2100         if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2101                 verb = 1;
2102                 switch (cmd) {
2103                 case P_DATA_REQUEST:
2104                         drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2105                         break;
2106                 case P_RS_DATA_REQUEST:
2107                 case P_CSUM_RS_REQUEST:
2108                 case P_OV_REQUEST:
2109                         drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2110                         break;
2111                 case P_OV_REPLY:
2112                         verb = 0;
2113                         dec_rs_pending(mdev);
2114                         drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2115                         break;
2116                 default:
2117                         dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2118                                 cmdname(cmd));
2119                 }
2120                 if (verb && __ratelimit(&drbd_ratelimit_state))
2121                         dev_err(DEV, "Can not satisfy peer's read request, "
2122                             "no local data.\n");
2123
2124                 /* drain possibly payload */
2125                 return drbd_drain_block(mdev, digest_size);
2126         }
2127
2128         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2129          * "criss-cross" setup, that might cause write-out on some other DRBD,
2130          * which in turn might block on the other node at this very place.  */
2131         e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2132         if (!e) {
2133                 put_ldev(mdev);
2134                 return FALSE;
2135         }
2136
2137         switch (cmd) {
2138         case P_DATA_REQUEST:
2139                 e->w.cb = w_e_end_data_req;
2140                 fault_type = DRBD_FAULT_DT_RD;
2141                 /* application IO, don't drbd_rs_begin_io */
2142                 goto submit;
2143
2144         case P_RS_DATA_REQUEST:
2145                 e->w.cb = w_e_end_rsdata_req;
2146                 fault_type = DRBD_FAULT_RS_RD;
2147                 break;
2148
2149         case P_OV_REPLY:
2150         case P_CSUM_RS_REQUEST:
2151                 fault_type = DRBD_FAULT_RS_RD;
2152                 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2153                 if (!di)
2154                         goto out_free_e;
2155
2156                 di->digest_size = digest_size;
2157                 di->digest = (((char *)di)+sizeof(struct digest_info));
2158
2159                 e->digest = di;
2160                 e->flags |= EE_HAS_DIGEST;
2161
2162                 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2163                         goto out_free_e;
2164
2165                 if (cmd == P_CSUM_RS_REQUEST) {
2166                         D_ASSERT(mdev->agreed_pro_version >= 89);
2167                         e->w.cb = w_e_end_csum_rs_req;
2168                 } else if (cmd == P_OV_REPLY) {
2169                         e->w.cb = w_e_end_ov_reply;
2170                         dec_rs_pending(mdev);
2171                         /* drbd_rs_begin_io done when we sent this request,
2172                          * but accounting still needs to be done. */
2173                         goto submit_for_resync;
2174                 }
2175                 break;
2176
2177         case P_OV_REQUEST:
2178                 if (mdev->ov_start_sector == ~(sector_t)0 &&
2179                     mdev->agreed_pro_version >= 90) {
2180                         mdev->ov_start_sector = sector;
2181                         mdev->ov_position = sector;
2182                         mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2183                         dev_info(DEV, "Online Verify start sector: %llu\n",
2184                                         (unsigned long long)sector);
2185                 }
2186                 e->w.cb = w_e_end_ov_req;
2187                 fault_type = DRBD_FAULT_RS_RD;
2188                 break;
2189
2190         default:
2191                 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2192                     cmdname(cmd));
2193                 fault_type = DRBD_FAULT_MAX;
2194                 goto out_free_e;
2195         }
2196
2197         /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2198          * wrt the receiver, but it is not as straightforward as it may seem.
2199          * Various places in the resync start and stop logic assume resync
2200          * requests are processed in order, requeuing this on the worker thread
2201          * introduces a bunch of new code for synchronization between threads.
2202          *
2203          * Unlimited throttling before drbd_rs_begin_io may stall the resync
2204          * "forever", throttling after drbd_rs_begin_io will lock that extent
2205          * for application writes for the same time.  For now, just throttle
2206          * here, where the rest of the code expects the receiver to sleep for
2207          * a while, anyways.
2208          */
2209
2210         /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2211          * this defers syncer requests for some time, before letting at least
2212          * on request through.  The resync controller on the receiving side
2213          * will adapt to the incoming rate accordingly.
2214          *
2215          * We cannot throttle here if remote is Primary/SyncTarget:
2216          * we would also throttle its application reads.
2217          * In that case, throttling is done on the SyncTarget only.
2218          */
2219         if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2220                 msleep(100);
2221         if (drbd_rs_begin_io(mdev, e->sector))
2222                 goto out_free_e;
2223
2224 submit_for_resync:
2225         atomic_add(size >> 9, &mdev->rs_sect_ev);
2226
2227 submit:
2228         inc_unacked(mdev);
2229         spin_lock_irq(&mdev->req_lock);
2230         list_add_tail(&e->w.list, &mdev->read_ee);
2231         spin_unlock_irq(&mdev->req_lock);
2232
2233         if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2234                 return TRUE;
2235
2236         /* drbd_submit_ee currently fails for one reason only:
2237          * not being able to allocate enough bios.
2238          * Is dropping the connection going to help? */
2239         spin_lock_irq(&mdev->req_lock);
2240         list_del(&e->w.list);
2241         spin_unlock_irq(&mdev->req_lock);
2242         /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2243
2244 out_free_e:
2245         put_ldev(mdev);
2246         drbd_free_ee(mdev, e);
2247         return FALSE;
2248 }
2249
2250 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2251 {
2252         int self, peer, rv = -100;
2253         unsigned long ch_self, ch_peer;
2254
2255         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2256         peer = mdev->p_uuid[UI_BITMAP] & 1;
2257
2258         ch_peer = mdev->p_uuid[UI_SIZE];
2259         ch_self = mdev->comm_bm_set;
2260
2261         switch (mdev->net_conf->after_sb_0p) {
2262         case ASB_CONSENSUS:
2263         case ASB_DISCARD_SECONDARY:
2264         case ASB_CALL_HELPER:
2265                 dev_err(DEV, "Configuration error.\n");
2266                 break;
2267         case ASB_DISCONNECT:
2268                 break;
2269         case ASB_DISCARD_YOUNGER_PRI:
2270                 if (self == 0 && peer == 1) {
2271                         rv = -1;
2272                         break;
2273                 }
2274                 if (self == 1 && peer == 0) {
2275                         rv =  1;
2276                         break;
2277                 }
2278                 /* Else fall through to one of the other strategies... */
2279         case ASB_DISCARD_OLDER_PRI:
2280                 if (self == 0 && peer == 1) {
2281                         rv = 1;
2282                         break;
2283                 }
2284                 if (self == 1 && peer == 0) {
2285                         rv = -1;
2286                         break;
2287                 }
2288                 /* Else fall through to one of the other strategies... */
2289                 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2290                      "Using discard-least-changes instead\n");
2291         case ASB_DISCARD_ZERO_CHG:
2292                 if (ch_peer == 0 && ch_self == 0) {
2293                         rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2294                                 ? -1 : 1;
2295                         break;
2296                 } else {
2297                         if (ch_peer == 0) { rv =  1; break; }
2298                         if (ch_self == 0) { rv = -1; break; }
2299                 }
2300                 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2301                         break;
2302         case ASB_DISCARD_LEAST_CHG:
2303                 if      (ch_self < ch_peer)
2304                         rv = -1;
2305                 else if (ch_self > ch_peer)
2306                         rv =  1;
2307                 else /* ( ch_self == ch_peer ) */
2308                      /* Well, then use something else. */
2309                         rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2310                                 ? -1 : 1;
2311                 break;
2312         case ASB_DISCARD_LOCAL:
2313                 rv = -1;
2314                 break;
2315         case ASB_DISCARD_REMOTE:
2316                 rv =  1;
2317         }
2318
2319         return rv;
2320 }
2321
2322 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2323 {
2324         int self, peer, hg, rv = -100;
2325
2326         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2327         peer = mdev->p_uuid[UI_BITMAP] & 1;
2328
2329         switch (mdev->net_conf->after_sb_1p) {
2330         case ASB_DISCARD_YOUNGER_PRI:
2331         case ASB_DISCARD_OLDER_PRI:
2332         case ASB_DISCARD_LEAST_CHG:
2333         case ASB_DISCARD_LOCAL:
2334         case ASB_DISCARD_REMOTE:
2335                 dev_err(DEV, "Configuration error.\n");
2336                 break;
2337         case ASB_DISCONNECT:
2338                 break;
2339         case ASB_CONSENSUS:
2340                 hg = drbd_asb_recover_0p(mdev);
2341                 if (hg == -1 && mdev->state.role == R_SECONDARY)
2342                         rv = hg;
2343                 if (hg == 1  && mdev->state.role == R_PRIMARY)
2344                         rv = hg;
2345                 break;
2346         case ASB_VIOLENTLY:
2347                 rv = drbd_asb_recover_0p(mdev);
2348                 break;
2349         case ASB_DISCARD_SECONDARY:
2350                 return mdev->state.role == R_PRIMARY ? 1 : -1;
2351         case ASB_CALL_HELPER:
2352                 hg = drbd_asb_recover_0p(mdev);
2353                 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2354                         self = drbd_set_role(mdev, R_SECONDARY, 0);
2355                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2356                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2357                           * we do not need to wait for the after state change work either. */
2358                         self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2359                         if (self != SS_SUCCESS) {
2360                                 drbd_khelper(mdev, "pri-lost-after-sb");
2361                         } else {
2362                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2363                                 rv = hg;
2364                         }
2365                 } else
2366                         rv = hg;
2367         }
2368
2369         return rv;
2370 }
2371
2372 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2373 {
2374         int self, peer, hg, rv = -100;
2375
2376         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2377         peer = mdev->p_uuid[UI_BITMAP] & 1;
2378
2379         switch (mdev->net_conf->after_sb_2p) {
2380         case ASB_DISCARD_YOUNGER_PRI:
2381         case ASB_DISCARD_OLDER_PRI:
2382         case ASB_DISCARD_LEAST_CHG:
2383         case ASB_DISCARD_LOCAL:
2384         case ASB_DISCARD_REMOTE:
2385         case ASB_CONSENSUS:
2386         case ASB_DISCARD_SECONDARY:
2387                 dev_err(DEV, "Configuration error.\n");
2388                 break;
2389         case ASB_VIOLENTLY:
2390                 rv = drbd_asb_recover_0p(mdev);
2391                 break;
2392         case ASB_DISCONNECT:
2393                 break;
2394         case ASB_CALL_HELPER:
2395                 hg = drbd_asb_recover_0p(mdev);
2396                 if (hg == -1) {
2397                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2398                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2399                           * we do not need to wait for the after state change work either. */
2400                         self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2401                         if (self != SS_SUCCESS) {
2402                                 drbd_khelper(mdev, "pri-lost-after-sb");
2403                         } else {
2404                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2405                                 rv = hg;
2406                         }
2407                 } else
2408                         rv = hg;
2409         }
2410
2411         return rv;
2412 }
2413
2414 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2415                            u64 bits, u64 flags)
2416 {
2417         if (!uuid) {
2418                 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2419                 return;
2420         }
2421         dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2422              text,
2423              (unsigned long long)uuid[UI_CURRENT],
2424              (unsigned long long)uuid[UI_BITMAP],
2425              (unsigned long long)uuid[UI_HISTORY_START],
2426              (unsigned long long)uuid[UI_HISTORY_END],
2427              (unsigned long long)bits,
2428              (unsigned long long)flags);
2429 }
2430
2431 /*
2432   100   after split brain try auto recover
2433     2   C_SYNC_SOURCE set BitMap
2434     1   C_SYNC_SOURCE use BitMap
2435     0   no Sync
2436    -1   C_SYNC_TARGET use BitMap
2437    -2   C_SYNC_TARGET set BitMap
2438  -100   after split brain, disconnect
2439 -1000   unrelated data
2440  */
2441 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2442 {
2443         u64 self, peer;
2444         int i, j;
2445
2446         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2447         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2448
2449         *rule_nr = 10;
2450         if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2451                 return 0;
2452
2453         *rule_nr = 20;
2454         if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2455              peer != UUID_JUST_CREATED)
2456                 return -2;
2457
2458         *rule_nr = 30;
2459         if (self != UUID_JUST_CREATED &&
2460             (peer == UUID_JUST_CREATED || peer == (u64)0))
2461                 return 2;
2462
2463         if (self == peer) {
2464                 int rct, dc; /* roles at crash time */
2465
2466                 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2467
2468                         if (mdev->agreed_pro_version < 91)
2469                                 return -1001;
2470
2471                         if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2472                             (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2473                                 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2474                                 drbd_uuid_set_bm(mdev, 0UL);
2475
2476                                 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2477                                                mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2478                                 *rule_nr = 34;
2479                         } else {
2480                                 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2481                                 *rule_nr = 36;
2482                         }
2483
2484                         return 1;
2485                 }
2486
2487                 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2488
2489                         if (mdev->agreed_pro_version < 91)
2490                                 return -1001;
2491
2492                         if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2493                             (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2494                                 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2495
2496                                 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2497                                 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2498                                 mdev->p_uuid[UI_BITMAP] = 0UL;
2499
2500                                 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2501                                 *rule_nr = 35;
2502                         } else {
2503                                 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2504                                 *rule_nr = 37;
2505                         }
2506
2507                         return -1;
2508                 }
2509
2510                 /* Common power [off|failure] */
2511                 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2512                         (mdev->p_uuid[UI_FLAGS] & 2);
2513                 /* lowest bit is set when we were primary,
2514                  * next bit (weight 2) is set when peer was primary */
2515                 *rule_nr = 40;
2516
2517                 switch (rct) {
2518                 case 0: /* !self_pri && !peer_pri */ return 0;
2519                 case 1: /*  self_pri && !peer_pri */ return 1;
2520                 case 2: /* !self_pri &&  peer_pri */ return -1;
2521                 case 3: /*  self_pri &&  peer_pri */
2522                         dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2523                         return dc ? -1 : 1;
2524                 }
2525         }
2526
2527         *rule_nr = 50;
2528         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2529         if (self == peer)
2530                 return -1;
2531
2532         *rule_nr = 51;
2533         peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2534         if (self == peer) {
2535                 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2536                 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2537                 if (self == peer) {
2538                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2539                            resync as sync source modifications of the peer's UUIDs. */
2540
2541                         if (mdev->agreed_pro_version < 91)
2542                                 return -1001;
2543
2544                         mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2545                         mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2546                         return -1;
2547                 }
2548         }
2549
2550         *rule_nr = 60;
2551         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2552         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2553                 peer = mdev->p_uuid[i] & ~((u64)1);
2554                 if (self == peer)
2555                         return -2;
2556         }
2557
2558         *rule_nr = 70;
2559         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2560         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2561         if (self == peer)
2562                 return 1;
2563
2564         *rule_nr = 71;
2565         self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2566         if (self == peer) {
2567                 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2568                 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2569                 if (self == peer) {
2570                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2571                            resync as sync source modifications of our UUIDs. */
2572
2573                         if (mdev->agreed_pro_version < 91)
2574                                 return -1001;
2575
2576                         _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2577                         _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2578
2579                         dev_info(DEV, "Undid last start of resync:\n");
2580
2581                         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2582                                        mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2583
2584                         return 1;
2585                 }
2586         }
2587
2588
2589         *rule_nr = 80;
2590         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2591         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2592                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2593                 if (self == peer)
2594                         return 2;
2595         }
2596
2597         *rule_nr = 90;
2598         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2599         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2600         if (self == peer && self != ((u64)0))
2601                 return 100;
2602
2603         *rule_nr = 100;
2604         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2605                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2606                 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2607                         peer = mdev->p_uuid[j] & ~((u64)1);
2608                         if (self == peer)
2609                                 return -100;
2610                 }
2611         }
2612
2613         return -1000;
2614 }
2615
2616 /* drbd_sync_handshake() returns the new conn state on success, or
2617    CONN_MASK (-1) on failure.
2618  */
2619 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2620                                            enum drbd_disk_state peer_disk) __must_hold(local)
2621 {
2622         int hg, rule_nr;
2623         enum drbd_conns rv = C_MASK;
2624         enum drbd_disk_state mydisk;
2625
2626         mydisk = mdev->state.disk;
2627         if (mydisk == D_NEGOTIATING)
2628                 mydisk = mdev->new_state_tmp.disk;
2629
2630         dev_info(DEV, "drbd_sync_handshake:\n");
2631         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2632         drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2633                        mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2634
2635         hg = drbd_uuid_compare(mdev, &rule_nr);
2636
2637         dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2638
2639         if (hg == -1000) {
2640                 dev_alert(DEV, "Unrelated data, aborting!\n");
2641                 return C_MASK;
2642         }
2643         if (hg == -1001) {
2644                 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2645                 return C_MASK;
2646         }
2647
2648         if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2649             (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2650                 int f = (hg == -100) || abs(hg) == 2;
2651                 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2652                 if (f)
2653                         hg = hg*2;
2654                 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2655                      hg > 0 ? "source" : "target");
2656         }
2657
2658         if (abs(hg) == 100)
2659                 drbd_khelper(mdev, "initial-split-brain");
2660
2661         if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2662                 int pcount = (mdev->state.role == R_PRIMARY)
2663                            + (peer_role == R_PRIMARY);
2664                 int forced = (hg == -100);
2665
2666                 switch (pcount) {
2667                 case 0:
2668                         hg = drbd_asb_recover_0p(mdev);
2669                         break;
2670                 case 1:
2671                         hg = drbd_asb_recover_1p(mdev);
2672                         break;
2673                 case 2:
2674                         hg = drbd_asb_recover_2p(mdev);
2675                         break;
2676                 }
2677                 if (abs(hg) < 100) {
2678                         dev_warn(DEV, "Split-Brain detected, %d primaries, "
2679                              "automatically solved. Sync from %s node\n",
2680                              pcount, (hg < 0) ? "peer" : "this");
2681                         if (forced) {
2682                                 dev_warn(DEV, "Doing a full sync, since"
2683                                      " UUIDs where ambiguous.\n");
2684                                 hg = hg*2;
2685                         }
2686                 }
2687         }
2688
2689         if (hg == -100) {
2690                 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2691                         hg = -1;
2692                 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2693                         hg = 1;
2694
2695                 if (abs(hg) < 100)
2696                         dev_warn(DEV, "Split-Brain detected, manually solved. "
2697                              "Sync from %s node\n",
2698                              (hg < 0) ? "peer" : "this");
2699         }
2700
2701         if (hg == -100) {
2702                 /* FIXME this log message is not correct if we end up here
2703                  * after an attempted attach on a diskless node.
2704                  * We just refuse to attach -- well, we drop the "connection"
2705                  * to that disk, in a way... */
2706                 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2707                 drbd_khelper(mdev, "split-brain");
2708                 return C_MASK;
2709         }
2710
2711         if (hg > 0 && mydisk <= D_INCONSISTENT) {
2712                 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2713                 return C_MASK;
2714         }
2715
2716         if (hg < 0 && /* by intention we do not use mydisk here. */
2717             mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2718                 switch (mdev->net_conf->rr_conflict) {
2719                 case ASB_CALL_HELPER:
2720                         drbd_khelper(mdev, "pri-lost");
2721                         /* fall through */
2722                 case ASB_DISCONNECT:
2723                         dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2724                         return C_MASK;
2725                 case ASB_VIOLENTLY:
2726                         dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2727                              "assumption\n");
2728                 }
2729         }
2730
2731         if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2732                 if (hg == 0)
2733                         dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2734                 else
2735                         dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2736                                  drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2737                                  abs(hg) >= 2 ? "full" : "bit-map based");
2738                 return C_MASK;
2739         }
2740
2741         if (abs(hg) >= 2) {
2742                 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2743                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2744                         return C_MASK;
2745         }
2746
2747         if (hg > 0) { /* become sync source. */
2748                 rv = C_WF_BITMAP_S;
2749         } else if (hg < 0) { /* become sync target */
2750                 rv = C_WF_BITMAP_T;
2751         } else {
2752                 rv = C_CONNECTED;
2753                 if (drbd_bm_total_weight(mdev)) {
2754                         dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2755                              drbd_bm_total_weight(mdev));
2756                 }
2757         }
2758
2759         return rv;
2760 }
2761
2762 /* returns 1 if invalid */
2763 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2764 {
2765         /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2766         if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2767             (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2768                 return 0;
2769
2770         /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2771         if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2772             self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2773                 return 1;
2774
2775         /* everything else is valid if they are equal on both sides. */
2776         if (peer == self)
2777                 return 0;
2778
2779         /* everything es is invalid. */
2780         return 1;
2781 }
2782
2783 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2784 {
2785         struct p_protocol *p = &mdev->data.rbuf.protocol;
2786         int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2787         int p_want_lose, p_two_primaries, cf;
2788         char p_integrity_alg[SHARED_SECRET_MAX] = "";
2789
2790         p_proto         = be32_to_cpu(p->protocol);
2791         p_after_sb_0p   = be32_to_cpu(p->after_sb_0p);
2792         p_after_sb_1p   = be32_to_cpu(p->after_sb_1p);
2793         p_after_sb_2p   = be32_to_cpu(p->after_sb_2p);
2794         p_two_primaries = be32_to_cpu(p->two_primaries);
2795         cf              = be32_to_cpu(p->conn_flags);
2796         p_want_lose = cf & CF_WANT_LOSE;
2797
2798         clear_bit(CONN_DRY_RUN, &mdev->flags);
2799
2800         if (cf & CF_DRY_RUN)
2801                 set_bit(CONN_DRY_RUN, &mdev->flags);
2802
2803         if (p_proto != mdev->net_conf->wire_protocol) {
2804                 dev_err(DEV, "incompatible communication protocols\n");
2805                 goto disconnect;
2806         }
2807
2808         if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2809                 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2810                 goto disconnect;
2811         }
2812
2813         if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2814                 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2815                 goto disconnect;
2816         }
2817
2818         if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2819                 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2820                 goto disconnect;
2821         }
2822
2823         if (p_want_lose && mdev->net_conf->want_lose) {
2824                 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2825                 goto disconnect;
2826         }
2827
2828         if (p_two_primaries != mdev->net_conf->two_primaries) {
2829                 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2830                 goto disconnect;
2831         }
2832
2833         if (mdev->agreed_pro_version >= 87) {
2834                 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2835
2836                 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2837                         return FALSE;
2838
2839                 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2840                 if (strcmp(p_integrity_alg, my_alg)) {
2841                         dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2842                         goto disconnect;
2843                 }
2844                 dev_info(DEV, "data-integrity-alg: %s\n",
2845                      my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2846         }
2847
2848         return TRUE;
2849
2850 disconnect:
2851         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2852         return FALSE;
2853 }
2854
2855 /* helper function
2856  * input: alg name, feature name
2857  * return: NULL (alg name was "")
2858  *         ERR_PTR(error) if something goes wrong
2859  *         or the crypto hash ptr, if it worked out ok. */
2860 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2861                 const char *alg, const char *name)
2862 {
2863         struct crypto_hash *tfm;
2864
2865         if (!alg[0])
2866                 return NULL;
2867
2868         tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2869         if (IS_ERR(tfm)) {
2870                 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2871                         alg, name, PTR_ERR(tfm));
2872                 return tfm;
2873         }
2874         if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2875                 crypto_free_hash(tfm);
2876                 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2877                 return ERR_PTR(-EINVAL);
2878         }
2879         return tfm;
2880 }
2881
2882 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2883 {
2884         int ok = TRUE;
2885         struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2886         unsigned int header_size, data_size, exp_max_sz;
2887         struct crypto_hash *verify_tfm = NULL;
2888         struct crypto_hash *csums_tfm = NULL;
2889         const int apv = mdev->agreed_pro_version;
2890         int *rs_plan_s = NULL;
2891         int fifo_size = 0;
2892
2893         exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
2894                     : apv == 88 ? sizeof(struct p_rs_param)
2895                                         + SHARED_SECRET_MAX
2896                     : apv <= 94 ? sizeof(struct p_rs_param_89)
2897                     : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2898
2899         if (packet_size > exp_max_sz) {
2900                 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2901                     packet_size, exp_max_sz);
2902                 return FALSE;
2903         }
2904
2905         if (apv <= 88) {
2906                 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2907                 data_size   = packet_size  - header_size;
2908         } else if (apv <= 94) {
2909                 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2910                 data_size   = packet_size  - header_size;
2911                 D_ASSERT(data_size == 0);
2912         } else {
2913                 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2914                 data_size   = packet_size  - header_size;
2915                 D_ASSERT(data_size == 0);
2916         }
2917
2918         /* initialize verify_alg and csums_alg */
2919         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2920
2921         if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2922                 return FALSE;
2923
2924         mdev->sync_conf.rate      = be32_to_cpu(p->rate);
2925
2926         if (apv >= 88) {
2927                 if (apv == 88) {
2928                         if (data_size > SHARED_SECRET_MAX) {
2929                                 dev_err(DEV, "verify-alg too long, "
2930                                     "peer wants %u, accepting only %u byte\n",
2931                                                 data_size, SHARED_SECRET_MAX);
2932                                 return FALSE;
2933                         }
2934
2935                         if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2936                                 return FALSE;
2937
2938                         /* we expect NUL terminated string */
2939                         /* but just in case someone tries to be evil */
2940                         D_ASSERT(p->verify_alg[data_size-1] == 0);
2941                         p->verify_alg[data_size-1] = 0;
2942
2943                 } else /* apv >= 89 */ {
2944                         /* we still expect NUL terminated strings */
2945                         /* but just in case someone tries to be evil */
2946                         D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2947                         D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2948                         p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2949                         p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2950                 }
2951
2952                 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2953                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2954                                 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2955                                     mdev->sync_conf.verify_alg, p->verify_alg);
2956                                 goto disconnect;
2957                         }
2958                         verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2959                                         p->verify_alg, "verify-alg");
2960                         if (IS_ERR(verify_tfm)) {
2961                                 verify_tfm = NULL;
2962                                 goto disconnect;
2963                         }
2964                 }
2965
2966                 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2967                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2968                                 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2969                                     mdev->sync_conf.csums_alg, p->csums_alg);
2970                                 goto disconnect;
2971                         }
2972                         csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2973                                         p->csums_alg, "csums-alg");
2974                         if (IS_ERR(csums_tfm)) {
2975                                 csums_tfm = NULL;
2976                                 goto disconnect;
2977                         }
2978                 }
2979
2980                 if (apv > 94) {
2981                         mdev->sync_conf.rate      = be32_to_cpu(p->rate);
2982                         mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2983                         mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2984                         mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2985                         mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2986
2987                         fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2988                         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2989                                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2990                                 if (!rs_plan_s) {
2991                                         dev_err(DEV, "kmalloc of fifo_buffer failed");
2992                                         goto disconnect;
2993                                 }
2994                         }
2995                 }
2996
2997                 spin_lock(&mdev->peer_seq_lock);
2998                 /* lock against drbd_nl_syncer_conf() */
2999                 if (verify_tfm) {
3000                         strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
3001                         mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
3002                         crypto_free_hash(mdev->verify_tfm);
3003                         mdev->verify_tfm = verify_tfm;
3004                         dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3005                 }
3006                 if (csums_tfm) {
3007                         strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
3008                         mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
3009                         crypto_free_hash(mdev->csums_tfm);
3010                         mdev->csums_tfm = csums_tfm;
3011                         dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3012                 }
3013                 if (fifo_size != mdev->rs_plan_s.size) {
3014                         kfree(mdev->rs_plan_s.values);
3015                         mdev->rs_plan_s.values = rs_plan_s;
3016                         mdev->rs_plan_s.size   = fifo_size;
3017                         mdev->rs_planed = 0;
3018                 }
3019                 spin_unlock(&mdev->peer_seq_lock);
3020         }
3021
3022         return ok;
3023 disconnect:
3024         /* just for completeness: actually not needed,
3025          * as this is not reached if csums_tfm was ok. */
3026         crypto_free_hash(csums_tfm);
3027         /* but free the verify_tfm again, if csums_tfm did not work out */
3028         crypto_free_hash(verify_tfm);
3029         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3030         return FALSE;
3031 }
3032
3033 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
3034 {
3035         /* sorry, we currently have no working implementation
3036          * of distributed TCQ */
3037 }
3038
3039 /* warn if the arguments differ by more than 12.5% */
3040 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3041         const char *s, sector_t a, sector_t b)
3042 {
3043         sector_t d;
3044         if (a == 0 || b == 0)
3045                 return;
3046         d = (a > b) ? (a - b) : (b - a);
3047         if (d > (a>>3) || d > (b>>3))
3048                 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3049                      (unsigned long long)a, (unsigned long long)b);
3050 }
3051
3052 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3053 {
3054         struct p_sizes *p = &mdev->data.rbuf.sizes;
3055         enum determine_dev_size dd = unchanged;
3056         unsigned int max_seg_s;
3057         sector_t p_size, p_usize, my_usize;
3058         int ldsc = 0; /* local disk size changed */
3059         enum dds_flags ddsf;
3060
3061         p_size = be64_to_cpu(p->d_size);
3062         p_usize = be64_to_cpu(p->u_size);
3063
3064         if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3065                 dev_err(DEV, "some backing storage is needed\n");
3066                 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3067                 return FALSE;
3068         }
3069
3070         /* just store the peer's disk size for now.
3071          * we still need to figure out whether we accept that. */
3072         mdev->p_size = p_size;
3073
3074         if (get_ldev(mdev)) {
3075                 warn_if_differ_considerably(mdev, "lower level device sizes",
3076                            p_size, drbd_get_max_capacity(mdev->ldev));
3077                 warn_if_differ_considerably(mdev, "user requested size",
3078                                             p_usize, mdev->ldev->dc.disk_size);
3079
3080                 /* if this is the first connect, or an otherwise expected
3081                  * param exchange, choose the minimum */
3082                 if (mdev->state.conn == C_WF_REPORT_PARAMS)