blob: 6fa6673b36b396765b58142e8e8abcdc4beaae05 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisner77351055b2011-02-07 17:24:26 +010051struct packet_info {
52 enum drbd_packet cmd;
Andreas Gruenbachere2857212011-03-25 00:57:38 +010053 unsigned int size;
54 unsigned int vnr;
Andreas Gruenbachere6589832011-03-30 12:54:42 +020055 void *data;
Philipp Reisner77351055b2011-02-07 17:24:26 +010056};
57
Philipp Reisnerb411b362009-09-25 16:07:19 -070058enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
Andreas Gruenbacher60381782011-03-28 17:05:50 +020064static int drbd_do_features(struct drbd_tconn *tconn);
Philipp Reisner13e60372011-02-08 09:54:40 +010065static int drbd_do_auth(struct drbd_tconn *tconn);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +020066static int drbd_disconnected(struct drbd_conf *mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -070067
Philipp Reisner1e9dd292011-11-10 15:14:53 +010068static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int e_end_block(struct drbd_work *, int);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
Lars Ellenberg45bb9122010-05-14 17:10:48 +020074/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020092
93 if (!page)
94 return NULL;
95
Lars Ellenberg45bb9122010-05-14 17:10:48 +020096 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700155{
156 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200157 struct page *tmp = NULL;
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200158 unsigned int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700167 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200168 if (page)
169 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700170 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200187 * No need to jump around here, drbd_alloc_pages will retry this
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700197}
198
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700201{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100202 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
Andreas Gruenbacher045417f2011-04-07 21:34:24 +0200212 if (drbd_peer_req_has_active_page(peer_req))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100221 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700222
Philipp Reisner87eeee42011-01-19 14:16:30 +0100223 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100225 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200228 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700229}
230
231/**
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700233 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700236 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
240 *
241 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242 */
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700245{
246 struct page *page = NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200247 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700248 DEFINE_WAIT(wait);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200249 int mxb;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200259 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200261 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
Philipp Reisner44ed1672011-04-19 17:10:19 +0200266 if (atomic_read(&mdev->pp_in_use) < mxb) {
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200267 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700286 return page;
287}
288
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +0200293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700296 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200297
Lars Ellenberga73ff322012-06-25 19:15:38 +0200298 if (page == NULL)
299 return;
300
Philipp Reisner81a5d602011-02-22 19:53:16 -0500301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700310 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200311 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200312 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200323 drbd_free_peer_req()
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200324 drbd_alloc_peer_req()
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200325 drbd_free_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326 drbd_ee_fix_bhs()
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200327 drbd_finish_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100332struct drbd_peer_request *
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100336 struct drbd_peer_request *peer_req;
Lars Ellenberga73ff322012-06-25 19:15:38 +0200337 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700339
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700341 return NULL;
342
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345 if (!(gfp_mask & __GFP_NOWARN))
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200346 dev_err(DEV, "%s: allocation failed\n", __func__);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700347 return NULL;
348 }
349
Lars Ellenberga73ff322012-06-25 19:15:38 +0200350 if (data_size) {
Lars Ellenberg81a35372012-07-30 09:00:54 +0200351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
Lars Ellenberga73ff322012-06-25 19:15:38 +0200352 if (!page)
353 goto fail;
354 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700361
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100362 peer_req->epoch = NULL;
Philipp Reisnera21e9292011-02-08 15:08:49 +0100363 peer_req->w.mdev = mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100371 peer_req->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700372
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100373 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700374
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200375 fail:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100376 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700377 return NULL;
378}
379
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100381 int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +0200385 drbd_free_pages(mdev, peer_req->pages, is_net);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700389}
390
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700392{
393 LIST_HEAD(work_list);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100394 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700395 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200396 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700397
Philipp Reisner87eeee42011-01-19 14:16:30 +0100398 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700399 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100400 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700401
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200403 __drbd_free_peer_req(mdev, peer_req, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 count++;
405 }
406 return count;
407}
408
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409/*
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 */
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100416 struct drbd_peer_request *peer_req, *t;
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100417 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700418
Philipp Reisner87eeee42011-01-19 14:16:30 +0100419 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100422 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200425 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700426
427 /* possible callbacks here:
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +0200428 * e_end_block, and e_end_resync_block, e_send_superseded.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 * all ignore the last argument.
430 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100432 int err2;
433
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* list_del not necessary, next/prev members not touched */
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200438 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 }
440 wake_up(&mdev->ee_wait);
441
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100442 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100454 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100455 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100457 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 }
459}
460
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100464 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700465 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100466 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700493 int rv;
494
Philipp Reisner1393b592012-09-03 14:04:23 +0200495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700496
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200497 if (rv < 0) {
498 if (rv == -ECONNRESET)
Philipp Reisner155522d2012-08-08 21:19:09 +0200499 conn_info(tconn, "sock was reset by peer\n");
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200500 else if (rv != -ERESTARTSYS)
Philipp Reisner155522d2012-08-08 21:19:09 +0200501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200502 } else if (rv == 0) {
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
Philipp Reisner599377a2012-08-17 14:50:22 +0200511 if (t)
512 goto out;
513 }
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200514 conn_info(tconn, "sock was shut down by peer\n");
Philipp Reisner599377a2012-08-17 14:50:22 +0200515 }
516
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517 if (rv != size)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700519
Philipp Reisner599377a2012-08-17 14:50:22 +0200520out:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700521 return rv;
522}
523
Andreas Gruenbacherc6967742011-03-17 17:15:20 +0100524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
Andreas Gruenbachera5c31902011-03-24 03:28:04 +0100537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
Philipp Reisnereac3e992011-02-07 14:05:07 +0100566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200574 int sndbuf_size, rcvbuf_size, connect_int;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 int disconnect_on_error = 1;
576
Philipp Reisner44ed1672011-04-19 17:10:19 +0200577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 return NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200582 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200585 connect_int = nc->connect_int;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200586 rcu_read_unlock();
Philipp Reisner44ed1672011-04-19 17:10:19 +0200587
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200590
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598
599 what = "sock_create_kern";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200608 sock->sk->sk_sndtimeo = connect_int * HZ;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618 what = "bind before connect";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
Philipp Reisnereac3e992011-02-07 14:05:07 +0100645 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700646 }
647 if (disconnect_on_error)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700649 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200650
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 return sock;
652}
653
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200662static void drbd_incoming_connection(struct sock *sk)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663{
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200664 struct accept_wait_data *ad = sk->sk_user_data;
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200665 void (*state_change)(struct sock *sk);
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200666
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700674{
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200676 struct sockaddr_in6 my_addr;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200677 struct socket *s_listen;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200678 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700679 const char *what;
680
Philipp Reisner44ed1672011-04-19 17:10:19 +0200681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200685 return -EIO;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200686 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200689 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700690
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700693
694 what = "sock_create_kern";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
Philipp Reisner98683652012-11-09 14:18:43 +0100702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704
705 what = "bind before listen";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 if (err < 0)
708 goto out;
709
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200713 s_listen->sk->sk_state_change = drbd_incoming_connection;
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716
Philipp Reisner2820fd32012-07-12 10:22:48 +0200717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200722 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730 }
731 }
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200732
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200733 return -EIO;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200734}
735
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
737{
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
742}
743
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200745{
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
Akinobu Mita38b682b2013-04-29 16:21:31 -0700760 /* 28.5% random jitter */
761 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200762
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200763 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764 if (err <= 0)
765 return NULL;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200766
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200767 err = kernel_accept(ad->s_listen, &s_estab, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768 if (err < 0) {
769 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200770 conn_err(tconn, "accept failed, err = %d\n", err);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100771 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 }
773 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700774
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200775 if (s_estab)
776 unregister_state_change(s_estab->sk, ad);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777
778 return s_estab;
779}
780
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200781static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700782
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200783static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784 enum drbd_packet cmd)
785{
786 if (!conn_prepare_command(tconn, sock))
787 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200788 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700789}
790
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200791static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700792{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200793 unsigned int header_size = drbd_header_size(tconn);
794 struct packet_info pi;
795 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700796
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200797 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798 if (err != header_size) {
799 if (err >= 0)
800 err = -EIO;
801 return err;
802 }
803 err = decode_header(tconn, tconn->data.rbuf, &pi);
804 if (err)
805 return err;
806 return pi.cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700807}
808
809/**
810 * drbd_socket_okay() - Free the socket if its connection is not okay
Philipp Reisnerb411b362009-09-25 16:07:19 -0700811 * @sock: pointer to the pointer to the socket.
812 */
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100813static int drbd_socket_okay(struct socket **sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700814{
815 int rr;
816 char tb[4];
817
818 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100819 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700820
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100821 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700822
823 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100824 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825 } else {
826 sock_release(*sock);
827 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100828 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829 }
830}
Philipp Reisner2325eb62011-03-15 16:56:18 +0100831/* Gets called if a connection is established, or if a new minor gets created
832 in a connection */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200833int drbd_connected(struct drbd_conf *mdev)
Philipp Reisner907599e2011-02-08 11:25:37 +0100834{
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100835 int err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100836
837 atomic_set(&mdev->packet_seq, 0);
838 mdev->peer_seq = 0;
839
Philipp Reisner8410da82011-02-11 20:11:10 +0100840 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841 &mdev->tconn->cstate_mutex :
842 &mdev->own_state_mutex;
843
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100844 err = drbd_send_sync_param(mdev);
845 if (!err)
846 err = drbd_send_sizes(mdev, 0, 0);
847 if (!err)
848 err = drbd_send_uuids(mdev);
849 if (!err)
Philipp Reisner43de7c82011-11-10 13:16:13 +0100850 err = drbd_send_current_state(mdev);
Philipp Reisner907599e2011-02-08 11:25:37 +0100851 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner2d56a972013-03-27 14:08:34 +0100853 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisner8b924f12011-03-01 11:08:28 +0100854 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100855 return err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100856}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857
858/*
859 * return values:
860 * 1 yes, we have a valid connection
861 * 0 oops, did not work out, please try again
862 * -1 peer talks different language,
863 * no point in trying again, please go standalone.
864 * -2 We do not have a network config...
865 */
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200866static int conn_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700867{
Philipp Reisner7da35862011-12-19 22:42:56 +0100868 struct drbd_socket sock, msock;
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200869 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200870 struct net_conf *nc;
Philipp Reisner92f14952012-08-01 11:41:01 +0200871 int vnr, timeout, h, ok;
Philipp Reisner08b165b2011-09-05 16:22:33 +0200872 bool discard_my_data;
Philipp Reisner197296f2012-03-26 16:47:11 +0200873 enum drbd_state_rv rv;
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200874 struct accept_wait_data ad = {
875 .tconn = tconn,
876 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877 };
Philipp Reisnerb411b362009-09-25 16:07:19 -0700878
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200879 clear_bit(DISCONNECT_SENT, &tconn->flags);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100880 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700881 return -2;
882
Philipp Reisner7da35862011-12-19 22:42:56 +0100883 mutex_init(&sock.mutex);
884 sock.sbuf = tconn->data.sbuf;
885 sock.rbuf = tconn->data.rbuf;
886 sock.socket = NULL;
887 mutex_init(&msock.mutex);
888 msock.sbuf = tconn->meta.sbuf;
889 msock.rbuf = tconn->meta.rbuf;
890 msock.socket = NULL;
891
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100892 /* Assume that the peer only understands protocol 80 until we know better. */
893 tconn->agreed_pro_version = 80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200895 if (prepare_listen_socket(tconn, &ad))
896 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700897
898 do {
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200899 struct socket *s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700900
Philipp Reisner92f14952012-08-01 11:41:01 +0200901 s = drbd_try_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700902 if (s) {
Philipp Reisner7da35862011-12-19 22:42:56 +0100903 if (!sock.socket) {
904 sock.socket = s;
905 send_first_packet(tconn, &sock, P_INITIAL_DATA);
906 } else if (!msock.socket) {
Lars Ellenberg427c0432012-08-01 12:43:01 +0200907 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
Philipp Reisner7da35862011-12-19 22:42:56 +0100908 msock.socket = s;
909 send_first_packet(tconn, &msock, P_INITIAL_META);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910 } else {
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200911 conn_err(tconn, "Logic error in conn_connect()\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 goto out_release_sockets;
913 }
914 }
915
Philipp Reisner7da35862011-12-19 22:42:56 +0100916 if (sock.socket && msock.socket) {
917 rcu_read_lock();
918 nc = rcu_dereference(tconn->net_conf);
919 timeout = nc->ping_timeo * HZ / 10;
920 rcu_read_unlock();
921 schedule_timeout_interruptible(timeout);
922 ok = drbd_socket_okay(&sock.socket);
923 ok = drbd_socket_okay(&msock.socket) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924 if (ok)
925 break;
926 }
927
928retry:
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200929 s = drbd_wait_for_connect(tconn, &ad);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700930 if (s) {
Philipp Reisner92f14952012-08-01 11:41:01 +0200931 int fp = receive_first_packet(tconn, s);
Philipp Reisner7da35862011-12-19 22:42:56 +0100932 drbd_socket_okay(&sock.socket);
933 drbd_socket_okay(&msock.socket);
Philipp Reisner92f14952012-08-01 11:41:01 +0200934 switch (fp) {
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200935 case P_INITIAL_DATA:
Philipp Reisner7da35862011-12-19 22:42:56 +0100936 if (sock.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100937 conn_warn(tconn, "initial packet S crossed\n");
Philipp Reisner7da35862011-12-19 22:42:56 +0100938 sock_release(sock.socket);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200939 sock.socket = s;
940 goto randomize;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700941 }
Philipp Reisner7da35862011-12-19 22:42:56 +0100942 sock.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943 break;
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200944 case P_INITIAL_META:
Lars Ellenberg427c0432012-08-01 12:43:01 +0200945 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
Philipp Reisner7da35862011-12-19 22:42:56 +0100946 if (msock.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100947 conn_warn(tconn, "initial packet M crossed\n");
Philipp Reisner7da35862011-12-19 22:42:56 +0100948 sock_release(msock.socket);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200949 msock.socket = s;
950 goto randomize;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951 }
Philipp Reisner7da35862011-12-19 22:42:56 +0100952 msock.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953 break;
954 default:
Philipp Reisner907599e2011-02-08 11:25:37 +0100955 conn_warn(tconn, "Error receiving initial packet\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700956 sock_release(s);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200957randomize:
Akinobu Mita38b682b2013-04-29 16:21:31 -0700958 if (prandom_u32() & 1)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959 goto retry;
960 }
961 }
962
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100963 if (tconn->cstate <= C_DISCONNECTING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964 goto out_release_sockets;
965 if (signal_pending(current)) {
966 flush_signals(current);
967 smp_rmb();
Philipp Reisner907599e2011-02-08 11:25:37 +0100968 if (get_t_state(&tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700969 goto out_release_sockets;
970 }
971
Philipp Reisnerb666dbf2012-07-26 14:12:59 +0200972 ok = drbd_socket_okay(&sock.socket);
973 ok = drbd_socket_okay(&msock.socket) && ok;
974 } while (!ok);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700975
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200976 if (ad.s_listen)
977 sock_release(ad.s_listen);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700978
Philipp Reisner98683652012-11-09 14:18:43 +0100979 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981
Philipp Reisner7da35862011-12-19 22:42:56 +0100982 sock.socket->sk->sk_allocation = GFP_NOIO;
983 msock.socket->sk->sk_allocation = GFP_NOIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700984
Philipp Reisner7da35862011-12-19 22:42:56 +0100985 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988 /* NOT YET ...
Philipp Reisner7da35862011-12-19 22:42:56 +0100989 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Andreas Gruenbacher60381782011-03-28 17:05:50 +0200991 * first set it to the P_CONNECTION_FEATURES timeout,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992 * which we set to 4x the configured ping_timeout. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200993 rcu_read_lock();
994 nc = rcu_dereference(tconn->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995
Philipp Reisner7da35862011-12-19 22:42:56 +0100996 sock.socket->sk->sk_sndtimeo =
997 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200998
Philipp Reisner7da35862011-12-19 22:42:56 +0100999 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001000 timeout = nc->timeout * HZ / 10;
Philipp Reisner08b165b2011-09-05 16:22:33 +02001001 discard_my_data = nc->discard_my_data;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001002 rcu_read_unlock();
1003
Philipp Reisner7da35862011-12-19 22:42:56 +01001004 msock.socket->sk->sk_sndtimeo = timeout;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005
1006 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001007 * we use TCP_CORK where appropriate, though */
Philipp Reisner7da35862011-12-19 22:42:56 +01001008 drbd_tcp_nodelay(sock.socket);
1009 drbd_tcp_nodelay(msock.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001010
Philipp Reisner7da35862011-12-19 22:42:56 +01001011 tconn->data.socket = sock.socket;
1012 tconn->meta.socket = msock.socket;
Philipp Reisner907599e2011-02-08 11:25:37 +01001013 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001014
Andreas Gruenbacher60381782011-03-28 17:05:50 +02001015 h = drbd_do_features(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001016 if (h <= 0)
1017 return h;
1018
Philipp Reisner907599e2011-02-08 11:25:37 +01001019 if (tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001020 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Philipp Reisner907599e2011-02-08 11:25:37 +01001021 switch (drbd_do_auth(tconn)) {
Johannes Thomab10d96c2010-01-07 16:02:50 +01001022 case -1:
Philipp Reisner907599e2011-02-08 11:25:37 +01001023 conn_err(tconn, "Authentication of peer failed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001024 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +01001025 case 0:
Philipp Reisner907599e2011-02-08 11:25:37 +01001026 conn_err(tconn, "Authentication of peer failed, trying again.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01001027 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028 }
1029 }
1030
Philipp Reisner7da35862011-12-19 22:42:56 +01001031 tconn->data.socket->sk->sk_sndtimeo = timeout;
1032 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001033
Andreas Gruenbacher387eb302011-03-16 01:05:37 +01001034 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
Philipp Reisner7e2455c2010-04-22 14:50:23 +02001035 return -1;
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001036
Philipp Reisnera1096a62012-04-06 12:07:34 +02001037 set_bit(STATE_SENT, &tconn->flags);
Philipp Reisner197296f2012-03-26 16:47:11 +02001038
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref);
Andreas Gruenbacher26ea8f92013-06-25 16:50:03 +02001042 rcu_read_unlock();
1043
Philipp Reisner13c76ab2012-11-22 17:06:00 +01001044 /* Prevent a race between resync-handshake and
1045 * being promoted to Primary.
1046 *
1047 * Grab and release the state mutex, so we know that any current
1048 * drbd_set_role() is finished, and any incoming drbd_set_role
1049 * will see the STATE_SENT flag, and wait for it to be cleared.
1050 */
1051 mutex_lock(mdev->state_mutex);
1052 mutex_unlock(mdev->state_mutex);
1053
Philipp Reisner08b165b2011-09-05 16:22:33 +02001054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else
1057 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001059 drbd_connected(mdev);
1060 kref_put(&mdev->kref, &drbd_minor_destroy);
1061 rcu_read_lock();
1062 }
1063 rcu_read_unlock();
1064
Philipp Reisnera1096a62012-04-06 12:07:34 +02001065 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
Lars Ellenberged635cb02012-11-05 11:54:30 +01001066 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
Philipp Reisnera1096a62012-04-06 12:07:34 +02001067 clear_bit(STATE_SENT, &tconn->flags);
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001068 return 0;
Philipp Reisnera1096a62012-04-06 12:07:34 +02001069 }
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001070
Philipp Reisner823bd832012-11-08 15:04:36 +01001071 drbd_thread_start(&tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001072
Philipp Reisner08b165b2011-09-05 16:22:33 +02001073 mutex_lock(&tconn->conf_update);
1074 /* The discard_my_data flag is a single-shot modifier to the next
1075 * connection attempt, the handshake of which is now well underway.
1076 * No need for rcu style copying of the whole struct
1077 * just to clear a single value. */
1078 tconn->net_conf->discard_my_data = 0;
1079 mutex_unlock(&tconn->conf_update);
1080
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07001081 return h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082
1083out_release_sockets:
Philipp Reisner7a426fd2012-07-12 14:22:37 +02001084 if (ad.s_listen)
1085 sock_release(ad.s_listen);
Philipp Reisner7da35862011-12-19 22:42:56 +01001086 if (sock.socket)
1087 sock_release(sock.socket);
1088 if (msock.socket)
1089 sock_release(msock.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001090 return -1;
1091}
1092
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001093static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001094{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001095 unsigned int header_size = drbd_header_size(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001096
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02001097 if (header_size == sizeof(struct p_header100) &&
1098 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099 struct p_header100 *h = header;
1100 if (h->pad != 0) {
1101 conn_err(tconn, "Header padding is not zero\n");
1102 return -EINVAL;
1103 }
1104 pi->vnr = be16_to_cpu(h->volume);
1105 pi->cmd = be16_to_cpu(h->command);
1106 pi->size = be32_to_cpu(h->length);
1107 } else if (header_size == sizeof(struct p_header95) &&
1108 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001109 struct p_header95 *h = header;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001110 pi->cmd = be16_to_cpu(h->command);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +01001111 pi->size = be32_to_cpu(h->length);
1112 pi->vnr = 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001113 } else if (header_size == sizeof(struct p_header80) &&
1114 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115 struct p_header80 *h = header;
1116 pi->cmd = be16_to_cpu(h->command);
1117 pi->size = be16_to_cpu(h->length);
Philipp Reisner77351055b2011-02-07 17:24:26 +01001118 pi->vnr = 0;
Philipp Reisner02918be2010-08-20 14:35:10 +02001119 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001120 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121 be32_to_cpu(*(__be32 *)header),
1122 tconn->agreed_pro_version);
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001123 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001124 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001125 pi->data = header + header_size;
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001126 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001127}
1128
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001129static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner257d0af2011-01-26 12:15:29 +01001130{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001131 void *buffer = tconn->data.rbuf;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001132 int err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001133
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001134 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001135 if (err)
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001136 return err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001137
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001138 err = decode_header(tconn, buffer, pi);
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001139 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001140
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001141 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001142}
1143
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001144static void drbd_flush(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001145{
1146 int rv;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001147 struct drbd_conf *mdev;
1148 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001149
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001150 if (tconn->write_ordering >= WO_bdev_flush) {
Lars Ellenberg615e0872011-11-17 14:32:12 +01001151 rcu_read_lock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001152 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Lars Ellenberg615e0872011-11-17 14:32:12 +01001153 if (!get_ldev(mdev))
1154 continue;
1155 kref_get(&mdev->kref);
1156 rcu_read_unlock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001157
Lars Ellenberg615e0872011-11-17 14:32:12 +01001158 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 GFP_NOIO, NULL);
1160 if (rv) {
1161 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162 /* would rather check on EOPNOTSUPP, but that is not reliable.
1163 * don't try again for ANY return value != 0
1164 * if (rv == -EOPNOTSUPP) */
1165 drbd_bump_write_ordering(tconn, WO_drain_io);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001166 }
Lars Ellenberg615e0872011-11-17 14:32:12 +01001167 put_ldev(mdev);
1168 kref_put(&mdev->kref, &drbd_minor_destroy);
1169
1170 rcu_read_lock();
1171 if (rv)
1172 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001173 }
Lars Ellenberg615e0872011-11-17 14:32:12 +01001174 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176}
1177
1178/**
1179 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180 * @mdev: DRBD device.
1181 * @epoch: Epoch object.
1182 * @ev: Epoch event.
1183 */
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001184static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001185 struct drbd_epoch *epoch,
1186 enum epoch_event ev)
1187{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001188 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190 enum finish_epoch rv = FE_STILL_LIVE;
1191
Philipp Reisner12038a32011-11-09 19:18:00 +01001192 spin_lock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193 do {
1194 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195
1196 epoch_size = atomic_read(&epoch->epoch_size);
1197
1198 switch (ev & ~EV_CLEANUP) {
1199 case EV_PUT:
1200 atomic_dec(&epoch->active);
1201 break;
1202 case EV_GOT_BARRIER_NR:
1203 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001204 break;
1205 case EV_BECAME_LAST:
1206 /* nothing to do*/
1207 break;
1208 }
1209
Philipp Reisnerb411b362009-09-25 16:07:19 -07001210 if (epoch_size != 0 &&
1211 atomic_read(&epoch->active) == 0 &&
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001212 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001213 if (!(ev & EV_CLEANUP)) {
Philipp Reisner12038a32011-11-09 19:18:00 +01001214 spin_unlock(&tconn->epoch_lock);
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001215 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
Philipp Reisner12038a32011-11-09 19:18:00 +01001216 spin_lock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001217 }
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001218#if 0
1219 /* FIXME: dec unacked on connection, once we have
1220 * something to count pending connection packets in. */
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001221 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001222 dec_unacked(epoch->tconn);
1223#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07001224
Philipp Reisner12038a32011-11-09 19:18:00 +01001225 if (tconn->current_epoch != epoch) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001226 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227 list_del(&epoch->list);
1228 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
Philipp Reisner12038a32011-11-09 19:18:00 +01001229 tconn->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001230 kfree(epoch);
1231
1232 if (rv == FE_STILL_LIVE)
1233 rv = FE_DESTROYED;
1234 } else {
1235 epoch->flags = 0;
1236 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001237 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238 if (rv == FE_STILL_LIVE)
1239 rv = FE_RECYCLED;
1240 }
1241 }
1242
1243 if (!next_epoch)
1244 break;
1245
1246 epoch = next_epoch;
1247 } while (1);
1248
Philipp Reisner12038a32011-11-09 19:18:00 +01001249 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001250
Philipp Reisnerb411b362009-09-25 16:07:19 -07001251 return rv;
1252}
1253
1254/**
1255 * drbd_bump_write_ordering() - Fall back to an other write ordering method
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001256 * @tconn: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257 * @wo: Write ordering method to try.
1258 */
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001259void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001260{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001261 struct disk_conf *dc;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001262 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263 enum write_ordering_e pwo;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001264 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001265 static char *write_ordering_str[] = {
1266 [WO_none] = "none",
1267 [WO_drain_io] = "drain",
1268 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 };
1270
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001271 pwo = tconn->write_ordering;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001272 wo = min(pwo, wo);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001273 rcu_read_lock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001274 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001275 if (!get_ldev_if_state(mdev, D_ATTACHING))
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001276 continue;
1277 dc = rcu_dereference(mdev->ldev->disk_conf);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001278
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001279 if (wo == WO_bdev_flush && !dc->disk_flushes)
1280 wo = WO_drain_io;
1281 if (wo == WO_drain_io && !dc->disk_drain)
1282 wo = WO_none;
1283 put_ldev(mdev);
1284 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001285 rcu_read_unlock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001286 tconn->write_ordering = wo;
1287 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001289}
1290
1291/**
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001292 * drbd_submit_peer_request()
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001293 * @mdev: DRBD device.
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001294 * @peer_req: peer request
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001295 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d992011-01-24 14:47:09 +01001296 *
1297 * May spread the pages to multiple bios,
1298 * depending on bio_add_page restrictions.
1299 *
1300 * Returns 0 if all bios have been submitted,
1301 * -ENOMEM if we could not allocate enough bios,
1302 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303 * single page to an empty bio (which should never happen and likely indicates
1304 * that the lower level IO stack is in some way broken). This has been observed
1305 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001306 */
1307/* TODO allocate from our own bio_set. */
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001308int drbd_submit_peer_request(struct drbd_conf *mdev,
1309 struct drbd_peer_request *peer_req,
1310 const unsigned rw, const int fault_type)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001311{
1312 struct bio *bios = NULL;
1313 struct bio *bio;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001314 struct page *page = peer_req->pages;
1315 sector_t sector = peer_req->i.sector;
1316 unsigned ds = peer_req->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001317 unsigned n_bios = 0;
1318 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d992011-01-24 14:47:09 +01001319 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001320
1321 /* In most cases, we will only need one bio. But in case the lower
1322 * level restrictions happen to be different at this offset on this
1323 * side than those of the sending peer, we may need to submit the
Lars Ellenberg9476f392011-02-23 17:02:01 +01001324 * request in more than one bio.
1325 *
1326 * Plain bio_alloc is good enough here, this is no DRBD internally
1327 * generated bio, but a bio allocated on behalf of the peer.
1328 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001329next_bio:
1330 bio = bio_alloc(GFP_NOIO, nr_pages);
1331 if (!bio) {
1332 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 goto fail;
1334 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001335 /* > peer_req->i.sector, unless this is the first bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001336 bio->bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001338 bio->bi_rw = rw;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001339 bio->bi_private = peer_req;
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001340 bio->bi_end_io = drbd_peer_request_endio;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001341
1342 bio->bi_next = bios;
1343 bios = bio;
1344 ++n_bios;
1345
1346 page_chain_for_each(page) {
1347 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d992011-01-24 14:47:09 +01001349 /* A single page must always be possible!
1350 * But in case it fails anyways,
1351 * we deal with it, and complain (below). */
1352 if (bio->bi_vcnt == 0) {
1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector);
1357 err = -ENOSPC;
1358 goto fail;
1359 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001360 goto next_bio;
1361 }
1362 ds -= len;
1363 sector += len >> 9;
1364 --nr_pages;
1365 }
1366 D_ASSERT(page == NULL);
1367 D_ASSERT(ds == 0);
1368
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001369 atomic_set(&peer_req->pending_bios, n_bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001370 do {
1371 bio = bios;
1372 bios = bios->bi_next;
1373 bio->bi_next = NULL;
1374
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001375 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001376 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001377 return 0;
1378
1379fail:
1380 while (bios) {
1381 bio = bios;
1382 bios = bios->bi_next;
1383 bio_put(bio);
1384 }
Lars Ellenberg10f6d992011-01-24 14:47:09 +01001385 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001386}
1387
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001388static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001389 struct drbd_peer_request *peer_req)
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001390{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001391 struct drbd_interval *i = &peer_req->i;
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001392
1393 drbd_remove_interval(&mdev->write_requests, i);
1394 drbd_clear_interval(i);
1395
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001396 /* Wake up any processes waiting for this peer request to complete. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001397 if (i->waiting)
1398 wake_up(&mdev->misc_wait);
1399}
1400
Philipp Reisner77fede52011-11-10 21:19:11 +01001401void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402{
1403 struct drbd_conf *mdev;
1404 int vnr;
1405
1406 rcu_read_lock();
1407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408 kref_get(&mdev->kref);
1409 rcu_read_unlock();
1410 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411 kref_put(&mdev->kref, &drbd_minor_destroy);
1412 rcu_read_lock();
1413 }
1414 rcu_read_unlock();
1415}
1416
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001417static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001418{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001419 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001420 struct p_barrier *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 struct drbd_epoch *epoch;
1422
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001423 /* FIXME these are unacked on connection,
1424 * not a specific (peer)device.
1425 */
Philipp Reisner12038a32011-11-09 19:18:00 +01001426 tconn->current_epoch->barrier_nr = p->barrier;
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001427 tconn->current_epoch->tconn = tconn;
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001428 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429
1430 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431 * the activity log, which means it would not be resynced in case the
1432 * R_PRIMARY crashes now.
1433 * Therefore we must send the barrier_ack after the barrier request was
1434 * completed. */
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001435 switch (tconn->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001436 case WO_none:
1437 if (rv == FE_RECYCLED)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001438 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001439
1440 /* receiver context, in the writeout path of the other node.
1441 * avoid potential distributed deadlock */
1442 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443 if (epoch)
1444 break;
1445 else
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001446 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
Philipp Reisner2451fc32010-08-24 13:43:11 +02001447 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448
1449 case WO_bdev_flush:
1450 case WO_drain_io:
Philipp Reisner77fede52011-11-10 21:19:11 +01001451 conn_wait_active_ee_empty(tconn);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001452 drbd_flush(tconn);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001453
Philipp Reisner12038a32011-11-09 19:18:00 +01001454 if (atomic_read(&tconn->current_epoch->epoch_size)) {
Philipp Reisner2451fc32010-08-24 13:43:11 +02001455 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456 if (epoch)
1457 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 }
1459
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001460 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001461 default:
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001462 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001463 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001464 }
1465
1466 epoch->flags = 0;
1467 atomic_set(&epoch->epoch_size, 0);
1468 atomic_set(&epoch->active, 0);
1469
Philipp Reisner12038a32011-11-09 19:18:00 +01001470 spin_lock(&tconn->epoch_lock);
1471 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472 list_add(&epoch->list, &tconn->current_epoch->list);
1473 tconn->current_epoch = epoch;
1474 tconn->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475 } else {
1476 /* The current_epoch got recycled while we allocated this one... */
1477 kfree(epoch);
1478 }
Philipp Reisner12038a32011-11-09 19:18:00 +01001479 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001481 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001482}
1483
1484/* used from receive_RSDataReply (recv_resync_read)
1485 * and from receive_Data */
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001486static struct drbd_peer_request *
1487read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488 int data_size) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001489{
Lars Ellenberg66660322010-04-06 12:15:04 +02001490 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001491 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001493 int dgs, ds, err;
Philipp Reisnera0638452011-01-19 14:31:32 +01001494 void *dig_in = mdev->tconn->int_dig_in;
1495 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001496 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001498 dgs = 0;
1499 if (mdev->tconn->peer_integrity_tfm) {
1500 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001501 /*
1502 * FIXME: Receive the incoming digest into the receive buffer
1503 * here, together with its struct p_data?
1504 */
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001505 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001507 return NULL;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001508 data_size -= dgs;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509 }
1510
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001511 if (!expect(IS_ALIGNED(data_size, 512)))
1512 return NULL;
1513 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515
Lars Ellenberg66660322010-04-06 12:15:04 +02001516 /* even though we trust out peer,
1517 * we sometimes have to double check. */
1518 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001519 dev_err(DEV, "request from peer beyond end of local disk: "
1520 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001521 (unsigned long long)capacity,
1522 (unsigned long long)sector, data_size);
1523 return NULL;
1524 }
1525
Philipp Reisnerb411b362009-09-25 16:07:19 -07001526 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527 * "criss-cross" setup, that might cause write-out on some other DRBD,
1528 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02001529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001530 if (!peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001531 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001532
Lars Ellenberga73ff322012-06-25 19:15:38 +02001533 if (!data_size)
Lars Ellenberg81a35372012-07-30 09:00:54 +02001534 return peer_req;
Lars Ellenberga73ff322012-06-25 19:15:38 +02001535
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536 ds = data_size;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001537 page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001538 page_chain_for_each(page) {
1539 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001540 data = kmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001541 err = drbd_recv_all_warn(mdev->tconn, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001542 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001543 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544 data[0] = data[0] ^ (unsigned long)-1;
1545 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001546 kunmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001547 if (err) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001548 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 return NULL;
1550 }
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001551 ds -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552 }
1553
1554 if (dgs) {
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02001555 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001556 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001557 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558 (unsigned long long)sector, data_size);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001559 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560 return NULL;
1561 }
1562 }
1563 mdev->recv_cnt += data_size>>9;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001564 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565}
1566
1567/* drbd_drain_block() just takes a data block
1568 * out of the socket input buffer, and discards it.
1569 */
1570static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571{
1572 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001573 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001574 void *data;
1575
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001576 if (!data_size)
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001577 return 0;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001578
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +02001579 page = drbd_alloc_pages(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001580
1581 data = kmap(page);
1582 while (data_size) {
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001583 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001585 err = drbd_recv_all_warn(mdev->tconn, data, len);
1586 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001587 break;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001588 data_size -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 }
1590 kunmap(page);
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +02001591 drbd_free_pages(mdev, page, 0);
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001592 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593}
1594
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size)
1597{
1598 struct bio_vec *bvec;
1599 struct bio *bio;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001600 int dgs, err, i, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001601 void *dig_in = mdev->tconn->int_dig_in;
1602 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001603
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001604 dgs = 0;
1605 if (mdev->tconn->peer_integrity_tfm) {
1606 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001607 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1608 if (err)
1609 return err;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001610 data_size -= dgs;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001611 }
1612
Philipp Reisnerb411b362009-09-25 16:07:19 -07001613 /* optimistically update recv_cnt. if receiving fails below,
1614 * we disconnect anyways, and counters will be reset. */
1615 mdev->recv_cnt += data_size>>9;
1616
1617 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector);
1619
1620 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001622 expect = min_t(int, data_size, bvec->bv_len);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624 kunmap(bvec->bv_page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001625 if (err)
1626 return err;
1627 data_size -= expect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001628 }
1629
1630 if (dgs) {
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02001631 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001632 if (memcmp(dig_in, dig_vv, dgs)) {
1633 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001634 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001635 }
1636 }
1637
1638 D_ASSERT(data_size == 0);
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001639 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001640}
1641
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001642/*
1643 * e_end_resync_block() is called in asender context via
1644 * drbd_finish_peer_reqs().
1645 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001646static int e_end_resync_block(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001647{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001648 struct drbd_peer_request *peer_req =
1649 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001650 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001651 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001652 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001653
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001654 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001656 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1657 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001658 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001659 } else {
1660 /* Record failure to sync */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001661 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001662
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001663 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664 }
1665 dec_unacked(mdev);
1666
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001667 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001668}
1669
1670static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1671{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001672 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001673
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001674 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1675 if (!peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001676 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677
1678 dec_rs_pending(mdev);
1679
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 inc_unacked(mdev);
1681 /* corresponding dec_unacked() in e_end_resync_block()
1682 * respective _drbd_clear_done_ee */
1683
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001684 peer_req->w.cb = e_end_resync_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001685
Philipp Reisner87eeee42011-01-19 14:16:30 +01001686 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001687 list_add(&peer_req->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001688 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001689
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001690 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001691 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001692 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001693
Lars Ellenberg10f6d992011-01-24 14:47:09 +01001694 /* don't care for the reason here */
1695 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001696 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001697 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001698 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001699
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001700 drbd_free_peer_req(mdev, peer_req);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001701fail:
1702 put_ldev(mdev);
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001703 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001704}
1705
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001706static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001707find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1708 sector_t sector, bool missing_ok, const char *func)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001709{
1710 struct drbd_request *req;
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001711
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001712 /* Request object according to our peer */
1713 req = (struct drbd_request *)(unsigned long)id;
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001714 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001715 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001716 if (!missing_ok) {
Andreas Gruenbacher5af172e2011-07-15 09:43:23 +02001717 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001718 (unsigned long)id, (unsigned long long)sector);
1719 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001720 return NULL;
1721}
1722
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001723static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001724{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001725 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001726 struct drbd_request *req;
1727 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001728 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001729 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001730
1731 mdev = vnr_to_mdev(tconn, pi->vnr);
1732 if (!mdev)
1733 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001734
1735 sector = be64_to_cpu(p->sector);
1736
Philipp Reisner87eeee42011-01-19 14:16:30 +01001737 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001738 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001739 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001740 if (unlikely(!req))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001741 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001742
Bart Van Assche24c48302011-05-21 18:32:29 +02001743 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001744 * special casing it there for the various failure cases.
1745 * still no race with drbd_fail_pending_reads */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001746 err = recv_dless_read(mdev, req, sector, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001747 if (!err)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001748 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001749 /* else: nothing. handled from drbd_disconnect...
1750 * I don't think we may complete this just yet
1751 * in case we are "on-disconnect: freeze" */
1752
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001753 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001754}
1755
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001756static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001757{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001758 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001759 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001760 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001761 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001762
1763 mdev = vnr_to_mdev(tconn, pi->vnr);
1764 if (!mdev)
1765 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001766
1767 sector = be64_to_cpu(p->sector);
1768 D_ASSERT(p->block_id == ID_SYNCER);
1769
1770 if (get_ldev(mdev)) {
1771 /* data is submitted to disk within recv_resync_read.
1772 * corresponding put_ldev done below on error,
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001773 * or in drbd_peer_request_endio. */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001774 err = recv_resync_read(mdev, sector, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001775 } else {
1776 if (__ratelimit(&drbd_ratelimit_state))
1777 dev_err(DEV, "Can not write resync data to local disk.\n");
1778
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001779 err = drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001780
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001781 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782 }
1783
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001784 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
Philipp Reisner778f2712010-07-06 11:14:00 +02001785
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001786 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001787}
1788
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001789static void restart_conflicting_writes(struct drbd_conf *mdev,
1790 sector_t sector, int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001791{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001792 struct drbd_interval *i;
1793 struct drbd_request *req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001794
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001795 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1796 if (!i->local)
1797 continue;
1798 req = container_of(i, struct drbd_request, i);
1799 if (req->rq_state & RQ_LOCAL_PENDING ||
1800 !(req->rq_state & RQ_POSTPONED))
1801 continue;
Lars Ellenberg2312f0b2011-11-24 10:36:25 +01001802 /* as it is RQ_POSTPONED, this will cause it to
1803 * be queued on the retry workqueue. */
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001804 __req_mod(req, CONFLICT_RESOLVED, NULL);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001805 }
1806}
1807
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001808/*
1809 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
Philipp Reisnerb411b362009-09-25 16:07:19 -07001810 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001811static int e_end_block(struct drbd_work *w, int cancel)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001812{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001813 struct drbd_peer_request *peer_req =
1814 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001815 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001816 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001817 int err = 0, pcmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001818
Philipp Reisner303d1442011-04-13 16:24:47 -07001819 if (peer_req->flags & EE_SEND_WRITE_ACK) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001820 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001821 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1822 mdev->state.conn <= C_PAUSED_SYNC_T &&
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001823 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
Philipp Reisnerb411b362009-09-25 16:07:19 -07001824 P_RS_WRITE_ACK : P_WRITE_ACK;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001825 err = drbd_send_ack(mdev, pcmd, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001826 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001827 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001828 } else {
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001829 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001830 /* we expect it to be marked out of sync anyways...
1831 * maybe assert this? */
1832 }
1833 dec_unacked(mdev);
1834 }
1835 /* we delete from the conflict detection hash _after_ we sent out the
1836 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner302bdea2011-04-21 11:36:49 +02001837 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001838 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001839 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1840 drbd_remove_epoch_entry_interval(mdev, peer_req);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001841 if (peer_req->flags & EE_RESTART_REQUESTS)
1842 restart_conflicting_writes(mdev, sector, peer_req->i.size);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001843 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001844 } else
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001845 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001846
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001847 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001848
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001849 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850}
1851
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001852static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001853{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001854 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001855 struct drbd_peer_request *peer_req =
1856 container_of(w, struct drbd_peer_request, w);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001857 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001858
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001859 err = drbd_send_ack(mdev, ack, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001860 dec_unacked(mdev);
1861
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001862 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001863}
1864
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001865static int e_send_superseded(struct drbd_work *w, int unused)
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001866{
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001867 return e_send_ack(w, P_SUPERSEDED);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001868}
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001869
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001870static int e_send_retry_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001871{
1872 struct drbd_tconn *tconn = w->mdev->tconn;
1873
1874 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001875 P_RETRY_WRITE : P_SUPERSEDED);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001876}
1877
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001878static bool seq_greater(u32 a, u32 b)
1879{
1880 /*
1881 * We assume 32-bit wrap-around here.
1882 * For 24-bit wrap-around, we would have to shift:
1883 * a <<= 8; b <<= 8;
1884 */
1885 return (s32)a - (s32)b > 0;
1886}
1887
1888static u32 seq_max(u32 a, u32 b)
1889{
1890 return seq_greater(a, b) ? a : b;
1891}
1892
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001893static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001894{
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001895 unsigned int newest_peer_seq;
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001896
Philipp Reisnerb874d232013-10-23 10:59:16 +02001897 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001898 spin_lock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001899 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1900 mdev->peer_seq = newest_peer_seq;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001901 spin_unlock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001902 /* wake up only if we actually changed mdev->peer_seq */
1903 if (peer_seq == newest_peer_seq)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001904 wake_up(&mdev->seq_wait);
1905 }
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001906}
1907
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001908static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1909{
1910 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1911}
1912
1913/* maybe change sync_ee into interval trees as well? */
Philipp Reisner3ea35df2012-04-06 12:13:18 +02001914static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001915{
1916 struct drbd_peer_request *rs_req;
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001917 bool rv = 0;
1918
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001919 spin_lock_irq(&mdev->tconn->req_lock);
1920 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1921 if (overlaps(peer_req->i.sector, peer_req->i.size,
1922 rs_req->i.sector, rs_req->i.size)) {
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001923 rv = 1;
1924 break;
1925 }
1926 }
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001927 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001928
1929 return rv;
1930}
1931
Philipp Reisnerb411b362009-09-25 16:07:19 -07001932/* Called from receive_Data.
1933 * Synchronize packets on sock with packets on msock.
1934 *
1935 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1936 * packet traveling on msock, they are still processed in the order they have
1937 * been sent.
1938 *
1939 * Note: we don't care for Ack packets overtaking P_DATA packets.
1940 *
1941 * In case packet_seq is larger than mdev->peer_seq number, there are
1942 * outstanding packets on the msock. We wait for them to arrive.
1943 * In case we are the logically next packet, we update mdev->peer_seq
1944 * ourselves. Correctly handles 32bit wrap around.
1945 *
1946 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1947 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1948 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1949 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1950 *
1951 * returns 0 if we may process the packet,
1952 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001953static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001954{
1955 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001956 long timeout;
Philipp Reisnerb874d232013-10-23 10:59:16 +02001957 int ret = 0, tp;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001958
Philipp Reisnerb874d232013-10-23 10:59:16 +02001959 if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001960 return 0;
1961
Philipp Reisnerb411b362009-09-25 16:07:19 -07001962 spin_lock(&mdev->peer_seq_lock);
1963 for (;;) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001964 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1965 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001967 }
Philipp Reisnerb874d232013-10-23 10:59:16 +02001968
Philipp Reisnerb411b362009-09-25 16:07:19 -07001969 if (signal_pending(current)) {
1970 ret = -ERESTARTSYS;
1971 break;
1972 }
Philipp Reisnerb874d232013-10-23 10:59:16 +02001973
1974 rcu_read_lock();
1975 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1976 rcu_read_unlock();
1977
1978 if (!tp)
1979 break;
1980
1981 /* Only need to wait if two_primaries is enabled */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001982 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001983 spin_unlock(&mdev->peer_seq_lock);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001984 rcu_read_lock();
1985 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1986 rcu_read_unlock();
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001987 timeout = schedule_timeout(timeout);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988 spin_lock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001989 if (!timeout) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 ret = -ETIMEDOUT;
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001991 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001992 break;
1993 }
1994 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001995 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001996 finish_wait(&mdev->seq_wait, &wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001997 return ret;
1998}
1999
Lars Ellenberg688593c2010-11-17 22:25:03 +01002000/* see also bio_flags_to_wire()
2001 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2002 * flags and back. We may replicate to other kernel versions. */
2003static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002004{
Lars Ellenberg688593c2010-11-17 22:25:03 +01002005 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2006 (dpf & DP_FUA ? REQ_FUA : 0) |
2007 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2008 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002009}
2010
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002011static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2012 unsigned int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002013{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002014 struct drbd_interval *i;
2015
2016 repeat:
2017 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2018 struct drbd_request *req;
2019 struct bio_and_error m;
2020
2021 if (!i->local)
2022 continue;
2023 req = container_of(i, struct drbd_request, i);
2024 if (!(req->rq_state & RQ_POSTPONED))
2025 continue;
2026 req->rq_state &= ~RQ_POSTPONED;
2027 __req_mod(req, NEG_ACKED, &m);
2028 spin_unlock_irq(&mdev->tconn->req_lock);
2029 if (m.bio)
2030 complete_master_bio(mdev, &m);
2031 spin_lock_irq(&mdev->tconn->req_lock);
2032 goto repeat;
2033 }
2034}
2035
2036static int handle_write_conflicts(struct drbd_conf *mdev,
2037 struct drbd_peer_request *peer_req)
2038{
2039 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenberg427c0432012-08-01 12:43:01 +02002040 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002041 sector_t sector = peer_req->i.sector;
2042 const unsigned int size = peer_req->i.size;
2043 struct drbd_interval *i;
2044 bool equal;
2045 int err;
2046
2047 /*
2048 * Inserting the peer request into the write_requests tree will prevent
2049 * new conflicting local requests from being added.
2050 */
2051 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2052
2053 repeat:
2054 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2055 if (i == &peer_req->i)
2056 continue;
2057
2058 if (!i->local) {
2059 /*
2060 * Our peer has sent a conflicting remote request; this
2061 * should not happen in a two-node setup. Wait for the
2062 * earlier peer request to complete.
2063 */
2064 err = drbd_wait_misc(mdev, i);
2065 if (err)
2066 goto out;
2067 goto repeat;
2068 }
2069
2070 equal = i->sector == sector && i->size == size;
2071 if (resolve_conflicts) {
2072 /*
2073 * If the peer request is fully contained within the
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002074 * overlapping request, it can be considered overwritten
2075 * and thus superseded; otherwise, it will be retried
2076 * once all overlapping requests have completed.
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002077 */
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002078 bool superseded = i->sector <= sector && i->sector +
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002079 (i->size >> 9) >= sector + (size >> 9);
2080
2081 if (!equal)
2082 dev_alert(DEV, "Concurrent writes detected: "
2083 "local=%llus +%u, remote=%llus +%u, "
2084 "assuming %s came first\n",
2085 (unsigned long long)i->sector, i->size,
2086 (unsigned long long)sector, size,
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002087 superseded ? "local" : "remote");
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002088
2089 inc_unacked(mdev);
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002090 peer_req->w.cb = superseded ? e_send_superseded :
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002091 e_send_retry_write;
2092 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2093 wake_asender(mdev->tconn);
2094
2095 err = -ENOENT;
2096 goto out;
2097 } else {
2098 struct drbd_request *req =
2099 container_of(i, struct drbd_request, i);
2100
2101 if (!equal)
2102 dev_alert(DEV, "Concurrent writes detected: "
2103 "local=%llus +%u, remote=%llus +%u\n",
2104 (unsigned long long)i->sector, i->size,
2105 (unsigned long long)sector, size);
2106
2107 if (req->rq_state & RQ_LOCAL_PENDING ||
2108 !(req->rq_state & RQ_POSTPONED)) {
2109 /*
2110 * Wait for the node with the discard flag to
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002111 * decide if this request has been superseded
2112 * or needs to be retried.
2113 * Requests that have been superseded will
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002114 * disappear from the write_requests tree.
2115 *
2116 * In addition, wait for the conflicting
2117 * request to finish locally before submitting
2118 * the conflicting peer request.
2119 */
2120 err = drbd_wait_misc(mdev, &req->i);
2121 if (err) {
2122 _conn_request_state(mdev->tconn,
2123 NS(conn, C_TIMEOUT),
2124 CS_HARD);
2125 fail_postponed_requests(mdev, sector, size);
2126 goto out;
2127 }
2128 goto repeat;
2129 }
2130 /*
2131 * Remember to restart the conflicting requests after
2132 * the new peer request has completed.
2133 */
2134 peer_req->flags |= EE_RESTART_REQUESTS;
2135 }
2136 }
2137 err = 0;
2138
2139 out:
2140 if (err)
2141 drbd_remove_epoch_entry_interval(mdev, peer_req);
2142 return err;
2143}
2144
Philipp Reisnerb411b362009-09-25 16:07:19 -07002145/* mirrored write */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002146static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002148 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002149 sector_t sector;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002150 struct drbd_peer_request *peer_req;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002151 struct p_data *p = pi->data;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002152 u32 peer_seq = be32_to_cpu(p->seq_num);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153 int rw = WRITE;
2154 u32 dp_flags;
Philipp Reisner302bdea2011-04-21 11:36:49 +02002155 int err, tp;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002156
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002157 mdev = vnr_to_mdev(tconn, pi->vnr);
2158 if (!mdev)
2159 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002160
Philipp Reisnerb411b362009-09-25 16:07:19 -07002161 if (!get_ldev(mdev)) {
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002162 int err2;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002163
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002164 err = wait_for_and_update_peer_seq(mdev, peer_seq);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002165 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisner12038a32011-11-09 19:18:00 +01002166 atomic_inc(&tconn->current_epoch->epoch_size);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002167 err2 = drbd_drain_block(mdev, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002168 if (!err)
2169 err = err2;
2170 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002171 }
2172
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01002173 /*
2174 * Corresponding put_ldev done either below (on various errors), or in
2175 * drbd_peer_request_endio, if we successfully submit the data at the
2176 * end of this function.
2177 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178
2179 sector = be64_to_cpu(p->sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002180 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002181 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002182 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002183 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002184 }
2185
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002186 peer_req->w.cb = e_end_block;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002187
Lars Ellenberg688593c2010-11-17 22:25:03 +01002188 dp_flags = be32_to_cpu(p->dp_flags);
2189 rw |= wire_flags_to_bio(mdev, dp_flags);
Lars Ellenberg81a35372012-07-30 09:00:54 +02002190 if (peer_req->pages == NULL) {
2191 D_ASSERT(peer_req->i.size == 0);
Lars Ellenberga73ff322012-06-25 19:15:38 +02002192 D_ASSERT(dp_flags & DP_FLUSH);
2193 }
Lars Ellenberg688593c2010-11-17 22:25:03 +01002194
2195 if (dp_flags & DP_MAY_SET_IN_SYNC)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002196 peer_req->flags |= EE_MAY_SET_IN_SYNC;
Lars Ellenberg688593c2010-11-17 22:25:03 +01002197
Philipp Reisner12038a32011-11-09 19:18:00 +01002198 spin_lock(&tconn->epoch_lock);
2199 peer_req->epoch = tconn->current_epoch;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002200 atomic_inc(&peer_req->epoch->epoch_size);
2201 atomic_inc(&peer_req->epoch->active);
Philipp Reisner12038a32011-11-09 19:18:00 +01002202 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002203
Philipp Reisner302bdea2011-04-21 11:36:49 +02002204 rcu_read_lock();
2205 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2206 rcu_read_unlock();
2207 if (tp) {
2208 peer_req->flags |= EE_IN_INTERVAL_TREE;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002209 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2210 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002211 goto out_interrupted;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002212 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002213 err = handle_write_conflicts(mdev, peer_req);
2214 if (err) {
2215 spin_unlock_irq(&mdev->tconn->req_lock);
2216 if (err == -ENOENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002217 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002218 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002219 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002220 goto out_interrupted;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002221 }
Philipp Reisnerb874d232013-10-23 10:59:16 +02002222 } else {
2223 update_peer_seq(mdev, peer_seq);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002224 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb874d232013-10-23 10:59:16 +02002225 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002226 list_add(&peer_req->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002227 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002228
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01002229 if (mdev->state.conn == C_SYNC_TARGET)
Philipp Reisner3ea35df2012-04-06 12:13:18 +02002230 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01002231
Philipp Reisner303d1442011-04-13 16:24:47 -07002232 if (mdev->tconn->agreed_pro_version < 100) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02002233 rcu_read_lock();
2234 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
Philipp Reisner303d1442011-04-13 16:24:47 -07002235 case DRBD_PROT_C:
2236 dp_flags |= DP_SEND_WRITE_ACK;
2237 break;
2238 case DRBD_PROT_B:
2239 dp_flags |= DP_SEND_RECEIVE_ACK;
2240 break;
2241 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002242 rcu_read_unlock();
Philipp Reisner303d1442011-04-13 16:24:47 -07002243 }
2244
2245 if (dp_flags & DP_SEND_WRITE_ACK) {
2246 peer_req->flags |= EE_SEND_WRITE_ACK;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002247 inc_unacked(mdev);
2248 /* corresponding dec_unacked() in e_end_block()
2249 * respective _drbd_clear_done_ee */
Philipp Reisner303d1442011-04-13 16:24:47 -07002250 }
2251
2252 if (dp_flags & DP_SEND_RECEIVE_ACK) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002253 /* I really don't like it that the receiver thread
2254 * sends on the msock, but anyways */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002255 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002256 }
2257
Lars Ellenberg6719fb02010-10-18 23:04:07 +02002258 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002259 /* In case we have the only disk of the cluster, */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002260 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2261 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2262 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
Lars Ellenberg56392d22013-03-19 18:16:48 +01002263 drbd_al_begin_io(mdev, &peer_req->i, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002264 }
2265
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002266 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2267 if (!err)
2268 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002269
Lars Ellenberg10f6d992011-01-24 14:47:09 +01002270 /* don't care for the reason here */
2271 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002272 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002273 list_del(&peer_req->w.list);
2274 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002275 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002276 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
Lars Ellenberg181286a2011-03-31 15:18:56 +02002277 drbd_al_complete_io(mdev, &peer_req->i);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002278
Philipp Reisnerb411b362009-09-25 16:07:19 -07002279out_interrupted:
Philipp Reisner1e9dd292011-11-10 15:14:53 +01002280 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002281 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002282 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002283 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284}
2285
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002286/* We may throttle resync, if the lower device seems to be busy,
2287 * and current sync rate is above c_min_rate.
2288 *
2289 * To decide whether or not the lower device is busy, we use a scheme similar
2290 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2291 * (more than 64 sectors) of activity we cannot account for with our own resync
2292 * activity, it obviously is "busy".
2293 *
2294 * The current sync rate used here uses only the most recent two step marks,
2295 * to have a short time average so we can react faster.
2296 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002297int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002298{
2299 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2300 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01002301 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002302 int curr_events;
2303 int throttle = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002304 unsigned int c_min_rate;
2305
2306 rcu_read_lock();
2307 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2308 rcu_read_unlock();
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002309
2310 /* feature disabled? */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002311 if (c_min_rate == 0)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002312 return 0;
2313
Philipp Reisnere3555d82010-11-07 15:56:29 +01002314 spin_lock_irq(&mdev->al_lock);
2315 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2316 if (tmp) {
2317 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2318 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2319 spin_unlock_irq(&mdev->al_lock);
2320 return 0;
2321 }
2322 /* Do not slow down if app IO is already waiting for this extent */
2323 }
2324 spin_unlock_irq(&mdev->al_lock);
2325
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002326 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2327 (int)part_stat_read(&disk->part0, sectors[1]) -
2328 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01002329
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002330 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2331 unsigned long rs_left;
2332 int i;
2333
2334 mdev->rs_last_events = curr_events;
2335
2336 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2337 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01002338 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2339
2340 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2341 rs_left = mdev->ov_left;
2342 else
2343 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002344
2345 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2346 if (!dt)
2347 dt++;
2348 db = mdev->rs_mark_left[i] - rs_left;
2349 dbdt = Bit2KB(db/dt);
2350
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002351 if (dbdt > c_min_rate)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002352 throttle = 1;
2353 }
2354 return throttle;
2355}
2356
2357
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002358static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002359{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002360 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002361 sector_t sector;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002362 sector_t capacity;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002363 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002364 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002365 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002366 unsigned int fault_type;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002367 struct p_block_req *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002368
2369 mdev = vnr_to_mdev(tconn, pi->vnr);
2370 if (!mdev)
2371 return -EIO;
2372 capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002373
2374 sector = be64_to_cpu(p->sector);
2375 size = be32_to_cpu(p->blksize);
2376
Andreas Gruenbacherc670a392011-02-21 12:41:39 +01002377 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002378 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2379 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002380 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002381 }
2382 if (sector + (size>>9) > capacity) {
2383 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2384 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002385 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002386 }
2387
2388 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002389 verb = 1;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002390 switch (pi->cmd) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002391 case P_DATA_REQUEST:
2392 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2393 break;
2394 case P_RS_DATA_REQUEST:
2395 case P_CSUM_RS_REQUEST:
2396 case P_OV_REQUEST:
2397 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2398 break;
2399 case P_OV_REPLY:
2400 verb = 0;
2401 dec_rs_pending(mdev);
2402 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2403 break;
2404 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002405 BUG();
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002406 }
2407 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002408 dev_err(DEV, "Can not satisfy peer's read request, "
2409 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002410
Lars Ellenberga821cc42010-09-06 12:31:37 +02002411 /* drain possibly payload */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002412 return drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002413 }
2414
2415 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2416 * "criss-cross" setup, that might cause write-out on some other DRBD,
2417 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02002418 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002419 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002420 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002421 return -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002422 }
2423
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002424 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002425 case P_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002426 peer_req->w.cb = w_e_end_data_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002428 /* application IO, don't drbd_rs_begin_io */
2429 goto submit;
2430
Philipp Reisnerb411b362009-09-25 16:07:19 -07002431 case P_RS_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002432 peer_req->w.cb = w_e_end_rsdata_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002434 /* used in the sector offset progress display */
2435 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002436 break;
2437
2438 case P_OV_REPLY:
2439 case P_CSUM_RS_REQUEST:
2440 fault_type = DRBD_FAULT_RS_RD;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002441 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002442 if (!di)
2443 goto out_free_e;
2444
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002445 di->digest_size = pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002446 di->digest = (((char *)di)+sizeof(struct digest_info));
2447
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002448 peer_req->digest = di;
2449 peer_req->flags |= EE_HAS_DIGEST;
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002450
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002451 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002452 goto out_free_e;
2453
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002454 if (pi->cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002455 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002456 peer_req->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002457 /* used in the sector offset progress display */
2458 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002459 } else if (pi->cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002460 /* track progress, we may need to throttle */
2461 atomic_add(size >> 9, &mdev->rs_sect_in);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002462 peer_req->w.cb = w_e_end_ov_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002463 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002464 /* drbd_rs_begin_io done when we sent this request,
2465 * but accounting still needs to be done. */
2466 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002467 }
2468 break;
2469
2470 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002471 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002472 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002473 unsigned long now = jiffies;
2474 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002475 mdev->ov_start_sector = sector;
2476 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002477 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2478 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002479 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2480 mdev->rs_mark_left[i] = mdev->ov_left;
2481 mdev->rs_mark_time[i] = now;
2482 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002483 dev_info(DEV, "Online Verify start sector: %llu\n",
2484 (unsigned long long)sector);
2485 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002486 peer_req->w.cb = w_e_end_ov_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002487 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002488 break;
2489
Philipp Reisnerb411b362009-09-25 16:07:19 -07002490 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002491 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002492 }
2493
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002494 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2495 * wrt the receiver, but it is not as straightforward as it may seem.
2496 * Various places in the resync start and stop logic assume resync
2497 * requests are processed in order, requeuing this on the worker thread
2498 * introduces a bunch of new code for synchronization between threads.
2499 *
2500 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2501 * "forever", throttling after drbd_rs_begin_io will lock that extent
2502 * for application writes for the same time. For now, just throttle
2503 * here, where the rest of the code expects the receiver to sleep for
2504 * a while, anyways.
2505 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002506
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002507 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2508 * this defers syncer requests for some time, before letting at least
2509 * on request through. The resync controller on the receiving side
2510 * will adapt to the incoming rate accordingly.
2511 *
2512 * We cannot throttle here if remote is Primary/SyncTarget:
2513 * we would also throttle its application reads.
2514 * In that case, throttling is done on the SyncTarget only.
2515 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002516 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2517 schedule_timeout_uninterruptible(HZ/10);
2518 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002519 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002520
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002521submit_for_resync:
2522 atomic_add(size >> 9, &mdev->rs_sect_ev);
2523
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002524submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002525 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002526 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002527 list_add_tail(&peer_req->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002528 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01002530 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002531 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002532
Lars Ellenberg10f6d992011-01-24 14:47:09 +01002533 /* don't care for the reason here */
2534 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002535 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002536 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002537 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002538 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2539
Philipp Reisnerb411b362009-09-25 16:07:19 -07002540out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002541 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002542 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002543 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002544}
2545
2546static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2547{
2548 int self, peer, rv = -100;
2549 unsigned long ch_self, ch_peer;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002550 enum drbd_after_sb_p after_sb_0p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002551
2552 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2553 peer = mdev->p_uuid[UI_BITMAP] & 1;
2554
2555 ch_peer = mdev->p_uuid[UI_SIZE];
2556 ch_self = mdev->comm_bm_set;
2557
Philipp Reisner44ed1672011-04-19 17:10:19 +02002558 rcu_read_lock();
2559 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2560 rcu_read_unlock();
2561 switch (after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002562 case ASB_CONSENSUS:
2563 case ASB_DISCARD_SECONDARY:
2564 case ASB_CALL_HELPER:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002565 case ASB_VIOLENTLY:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002566 dev_err(DEV, "Configuration error.\n");
2567 break;
2568 case ASB_DISCONNECT:
2569 break;
2570 case ASB_DISCARD_YOUNGER_PRI:
2571 if (self == 0 && peer == 1) {
2572 rv = -1;
2573 break;
2574 }
2575 if (self == 1 && peer == 0) {
2576 rv = 1;
2577 break;
2578 }
2579 /* Else fall through to one of the other strategies... */
2580 case ASB_DISCARD_OLDER_PRI:
2581 if (self == 0 && peer == 1) {
2582 rv = 1;
2583 break;
2584 }
2585 if (self == 1 && peer == 0) {
2586 rv = -1;
2587 break;
2588 }
2589 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002590 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002591 "Using discard-least-changes instead\n");
2592 case ASB_DISCARD_ZERO_CHG:
2593 if (ch_peer == 0 && ch_self == 0) {
Lars Ellenberg427c0432012-08-01 12:43:01 +02002594 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002595 ? -1 : 1;
2596 break;
2597 } else {
2598 if (ch_peer == 0) { rv = 1; break; }
2599 if (ch_self == 0) { rv = -1; break; }
2600 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002601 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002602 break;
2603 case ASB_DISCARD_LEAST_CHG:
2604 if (ch_self < ch_peer)
2605 rv = -1;
2606 else if (ch_self > ch_peer)
2607 rv = 1;
2608 else /* ( ch_self == ch_peer ) */
2609 /* Well, then use something else. */
Lars Ellenberg427c0432012-08-01 12:43:01 +02002610 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002611 ? -1 : 1;
2612 break;
2613 case ASB_DISCARD_LOCAL:
2614 rv = -1;
2615 break;
2616 case ASB_DISCARD_REMOTE:
2617 rv = 1;
2618 }
2619
2620 return rv;
2621}
2622
2623static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2624{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002625 int hg, rv = -100;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002626 enum drbd_after_sb_p after_sb_1p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002627
Philipp Reisner44ed1672011-04-19 17:10:19 +02002628 rcu_read_lock();
2629 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2630 rcu_read_unlock();
2631 switch (after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002632 case ASB_DISCARD_YOUNGER_PRI:
2633 case ASB_DISCARD_OLDER_PRI:
2634 case ASB_DISCARD_LEAST_CHG:
2635 case ASB_DISCARD_LOCAL:
2636 case ASB_DISCARD_REMOTE:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002637 case ASB_DISCARD_ZERO_CHG:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002638 dev_err(DEV, "Configuration error.\n");
2639 break;
2640 case ASB_DISCONNECT:
2641 break;
2642 case ASB_CONSENSUS:
2643 hg = drbd_asb_recover_0p(mdev);
2644 if (hg == -1 && mdev->state.role == R_SECONDARY)
2645 rv = hg;
2646 if (hg == 1 && mdev->state.role == R_PRIMARY)
2647 rv = hg;
2648 break;
2649 case ASB_VIOLENTLY:
2650 rv = drbd_asb_recover_0p(mdev);
2651 break;
2652 case ASB_DISCARD_SECONDARY:
2653 return mdev->state.role == R_PRIMARY ? 1 : -1;
2654 case ASB_CALL_HELPER:
2655 hg = drbd_asb_recover_0p(mdev);
2656 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002657 enum drbd_state_rv rv2;
2658
Philipp Reisnerb411b362009-09-25 16:07:19 -07002659 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2660 * we might be here in C_WF_REPORT_PARAMS which is transient.
2661 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002662 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2663 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002664 drbd_khelper(mdev, "pri-lost-after-sb");
2665 } else {
2666 dev_warn(DEV, "Successfully gave up primary role.\n");
2667 rv = hg;
2668 }
2669 } else
2670 rv = hg;
2671 }
2672
2673 return rv;
2674}
2675
2676static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2677{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002678 int hg, rv = -100;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002679 enum drbd_after_sb_p after_sb_2p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002680
Philipp Reisner44ed1672011-04-19 17:10:19 +02002681 rcu_read_lock();
2682 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2683 rcu_read_unlock();
2684 switch (after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002685 case ASB_DISCARD_YOUNGER_PRI:
2686 case ASB_DISCARD_OLDER_PRI:
2687 case ASB_DISCARD_LEAST_CHG:
2688 case ASB_DISCARD_LOCAL:
2689 case ASB_DISCARD_REMOTE:
2690 case ASB_CONSENSUS:
2691 case ASB_DISCARD_SECONDARY:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002692 case ASB_DISCARD_ZERO_CHG:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002693 dev_err(DEV, "Configuration error.\n");
2694 break;
2695 case ASB_VIOLENTLY:
2696 rv = drbd_asb_recover_0p(mdev);
2697 break;
2698 case ASB_DISCONNECT:
2699 break;
2700 case ASB_CALL_HELPER:
2701 hg = drbd_asb_recover_0p(mdev);
2702 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002703 enum drbd_state_rv rv2;
2704
Philipp Reisnerb411b362009-09-25 16:07:19 -07002705 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2706 * we might be here in C_WF_REPORT_PARAMS which is transient.
2707 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002708 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2709 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002710 drbd_khelper(mdev, "pri-lost-after-sb");
2711 } else {
2712 dev_warn(DEV, "Successfully gave up primary role.\n");
2713 rv = hg;
2714 }
2715 } else
2716 rv = hg;
2717 }
2718
2719 return rv;
2720}
2721
2722static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2723 u64 bits, u64 flags)
2724{
2725 if (!uuid) {
2726 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2727 return;
2728 }
2729 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2730 text,
2731 (unsigned long long)uuid[UI_CURRENT],
2732 (unsigned long long)uuid[UI_BITMAP],
2733 (unsigned long long)uuid[UI_HISTORY_START],
2734 (unsigned long long)uuid[UI_HISTORY_END],
2735 (unsigned long long)bits,
2736 (unsigned long long)flags);
2737}
2738
2739/*
2740 100 after split brain try auto recover
2741 2 C_SYNC_SOURCE set BitMap
2742 1 C_SYNC_SOURCE use BitMap
2743 0 no Sync
2744 -1 C_SYNC_TARGET use BitMap
2745 -2 C_SYNC_TARGET set BitMap
2746 -100 after split brain, disconnect
2747-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002748-1091 requires proto 91
2749-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002750 */
2751static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2752{
2753 u64 self, peer;
2754 int i, j;
2755
2756 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2757 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2758
2759 *rule_nr = 10;
2760 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2761 return 0;
2762
2763 *rule_nr = 20;
2764 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2765 peer != UUID_JUST_CREATED)
2766 return -2;
2767
2768 *rule_nr = 30;
2769 if (self != UUID_JUST_CREATED &&
2770 (peer == UUID_JUST_CREATED || peer == (u64)0))
2771 return 2;
2772
2773 if (self == peer) {
2774 int rct, dc; /* roles at crash time */
2775
2776 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2777
Philipp Reisner31890f42011-01-19 14:12:51 +01002778 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002779 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780
2781 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2782 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2783 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002784 drbd_uuid_move_history(mdev);
2785 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2786 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787
2788 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2789 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2790 *rule_nr = 34;
2791 } else {
2792 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2793 *rule_nr = 36;
2794 }
2795
2796 return 1;
2797 }
2798
2799 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2800
Philipp Reisner31890f42011-01-19 14:12:51 +01002801 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002802 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002803
2804 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2805 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2806 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2807
2808 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2809 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2810 mdev->p_uuid[UI_BITMAP] = 0UL;
2811
2812 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2813 *rule_nr = 35;
2814 } else {
2815 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2816 *rule_nr = 37;
2817 }
2818
2819 return -1;
2820 }
2821
2822 /* Common power [off|failure] */
2823 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2824 (mdev->p_uuid[UI_FLAGS] & 2);
2825 /* lowest bit is set when we were primary,
2826 * next bit (weight 2) is set when peer was primary */
2827 *rule_nr = 40;
2828
2829 switch (rct) {
2830 case 0: /* !self_pri && !peer_pri */ return 0;
2831 case 1: /* self_pri && !peer_pri */ return 1;
2832 case 2: /* !self_pri && peer_pri */ return -1;
2833 case 3: /* self_pri && peer_pri */
Lars Ellenberg427c0432012-08-01 12:43:01 +02002834 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002835 return dc ? -1 : 1;
2836 }
2837 }
2838
2839 *rule_nr = 50;
2840 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2841 if (self == peer)
2842 return -1;
2843
2844 *rule_nr = 51;
2845 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2846 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002847 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002848 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2849 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2850 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002851 /* The last P_SYNC_UUID did not get though. Undo the last start of
2852 resync as sync source modifications of the peer's UUIDs. */
2853
Philipp Reisner31890f42011-01-19 14:12:51 +01002854 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002855 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002856
2857 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2858 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002859
Lars Ellenberg92b4ca22012-04-30 12:53:52 +02002860 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
Philipp Reisner4a23f262011-01-11 17:42:17 +01002861 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2862
Philipp Reisnerb411b362009-09-25 16:07:19 -07002863 return -1;
2864 }
2865 }
2866
2867 *rule_nr = 60;
2868 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2869 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2870 peer = mdev->p_uuid[i] & ~((u64)1);
2871 if (self == peer)
2872 return -2;
2873 }
2874
2875 *rule_nr = 70;
2876 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2877 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2878 if (self == peer)
2879 return 1;
2880
2881 *rule_nr = 71;
2882 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2883 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002884 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002885 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2886 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2887 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002888 /* The last P_SYNC_UUID did not get though. Undo the last start of
2889 resync as sync source modifications of our UUIDs. */
2890
Philipp Reisner31890f42011-01-19 14:12:51 +01002891 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002892 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002893
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002894 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2895 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002896
Philipp Reisner4a23f262011-01-11 17:42:17 +01002897 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002898 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2899 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2900
2901 return 1;
2902 }
2903 }
2904
2905
2906 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002907 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002908 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2909 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2910 if (self == peer)
2911 return 2;
2912 }
2913
2914 *rule_nr = 90;
2915 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2916 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2917 if (self == peer && self != ((u64)0))
2918 return 100;
2919
2920 *rule_nr = 100;
2921 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2922 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2923 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2924 peer = mdev->p_uuid[j] & ~((u64)1);
2925 if (self == peer)
2926 return -100;
2927 }
2928 }
2929
2930 return -1000;
2931}
2932
2933/* drbd_sync_handshake() returns the new conn state on success, or
2934 CONN_MASK (-1) on failure.
2935 */
2936static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2937 enum drbd_disk_state peer_disk) __must_hold(local)
2938{
Philipp Reisnerb411b362009-09-25 16:07:19 -07002939 enum drbd_conns rv = C_MASK;
2940 enum drbd_disk_state mydisk;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002941 struct net_conf *nc;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02002942 int hg, rule_nr, rr_conflict, tentative;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943
2944 mydisk = mdev->state.disk;
2945 if (mydisk == D_NEGOTIATING)
2946 mydisk = mdev->new_state_tmp.disk;
2947
2948 dev_info(DEV, "drbd_sync_handshake:\n");
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002949
2950 spin_lock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2952 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2953 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2954
2955 hg = drbd_uuid_compare(mdev, &rule_nr);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002956 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002957
2958 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2959
2960 if (hg == -1000) {
2961 dev_alert(DEV, "Unrelated data, aborting!\n");
2962 return C_MASK;
2963 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002964 if (hg < -1000) {
2965 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966 return C_MASK;
2967 }
2968
2969 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2970 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2971 int f = (hg == -100) || abs(hg) == 2;
2972 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2973 if (f)
2974 hg = hg*2;
2975 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2976 hg > 0 ? "source" : "target");
2977 }
2978
Adam Gandelman3a11a482010-04-08 16:48:23 -07002979 if (abs(hg) == 100)
2980 drbd_khelper(mdev, "initial-split-brain");
2981
Philipp Reisner44ed1672011-04-19 17:10:19 +02002982 rcu_read_lock();
2983 nc = rcu_dereference(mdev->tconn->net_conf);
2984
2985 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986 int pcount = (mdev->state.role == R_PRIMARY)
2987 + (peer_role == R_PRIMARY);
2988 int forced = (hg == -100);
2989
2990 switch (pcount) {
2991 case 0:
2992 hg = drbd_asb_recover_0p(mdev);
2993 break;
2994 case 1:
2995 hg = drbd_asb_recover_1p(mdev);
2996 break;
2997 case 2:
2998 hg = drbd_asb_recover_2p(mdev);
2999 break;
3000 }
3001 if (abs(hg) < 100) {
3002 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3003 "automatically solved. Sync from %s node\n",
3004 pcount, (hg < 0) ? "peer" : "this");
3005 if (forced) {
3006 dev_warn(DEV, "Doing a full sync, since"
3007 " UUIDs where ambiguous.\n");
3008 hg = hg*2;
3009 }
3010 }
3011 }
3012
3013 if (hg == -100) {
Philipp Reisner08b165b2011-09-05 16:22:33 +02003014 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015 hg = -1;
Philipp Reisner08b165b2011-09-05 16:22:33 +02003016 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003017 hg = 1;
3018
3019 if (abs(hg) < 100)
3020 dev_warn(DEV, "Split-Brain detected, manually solved. "
3021 "Sync from %s node\n",
3022 (hg < 0) ? "peer" : "this");
3023 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02003024 rr_conflict = nc->rr_conflict;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02003025 tentative = nc->tentative;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003026 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027
3028 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01003029 /* FIXME this log message is not correct if we end up here
3030 * after an attempted attach on a diskless node.
3031 * We just refuse to attach -- well, we drop the "connection"
3032 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07003033 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034 drbd_khelper(mdev, "split-brain");
3035 return C_MASK;
3036 }
3037
3038 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3039 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3040 return C_MASK;
3041 }
3042
3043 if (hg < 0 && /* by intention we do not use mydisk here. */
3044 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02003045 switch (rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046 case ASB_CALL_HELPER:
3047 drbd_khelper(mdev, "pri-lost");
3048 /* fall through */
3049 case ASB_DISCONNECT:
3050 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3051 return C_MASK;
3052 case ASB_VIOLENTLY:
3053 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3054 "assumption\n");
3055 }
3056 }
3057
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02003058 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003059 if (hg == 0)
3060 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3061 else
3062 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3063 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3064 abs(hg) >= 2 ? "full" : "bit-map based");
3065 return C_MASK;
3066 }
3067
Philipp Reisnerb411b362009-09-25 16:07:19 -07003068 if (abs(hg) >= 2) {
3069 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003070 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3071 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003072 return C_MASK;
3073 }
3074
3075 if (hg > 0) { /* become sync source. */
3076 rv = C_WF_BITMAP_S;
3077 } else if (hg < 0) { /* become sync target */
3078 rv = C_WF_BITMAP_T;
3079 } else {
3080 rv = C_CONNECTED;
3081 if (drbd_bm_total_weight(mdev)) {
3082 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3083 drbd_bm_total_weight(mdev));
3084 }
3085 }
3086
3087 return rv;
3088}
3089
Philipp Reisnerf179d762011-05-16 17:31:47 +02003090static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003091{
3092 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003093 if (peer == ASB_DISCARD_REMOTE)
3094 return ASB_DISCARD_LOCAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003095
3096 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003097 if (peer == ASB_DISCARD_LOCAL)
3098 return ASB_DISCARD_REMOTE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003099
3100 /* everything else is valid if they are equal on both sides. */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003101 return peer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003102}
3103
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003104static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003106 struct p_protocol *p = pi->data;
Philipp Reisner036b17e2011-05-16 17:38:11 +02003107 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3108 int p_proto, p_discard_my_data, p_two_primaries, cf;
3109 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3110 char integrity_alg[SHARED_SECRET_MAX] = "";
Andreas Gruenbacheraccdbcc2011-07-15 17:41:09 +02003111 struct crypto_hash *peer_integrity_tfm = NULL;
Philipp Reisner7aca6c72011-05-17 10:12:56 +02003112 void *int_dig_in = NULL, *int_dig_vv = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003113
Philipp Reisnerb411b362009-09-25 16:07:19 -07003114 p_proto = be32_to_cpu(p->protocol);
3115 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3116 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3117 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003118 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003119 cf = be32_to_cpu(p->conn_flags);
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02003120 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003121
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003122 if (tconn->agreed_pro_version >= 87) {
3123 int err;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003124
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02003125 if (pi->size > sizeof(integrity_alg))
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003126 return -EIO;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02003127 err = drbd_recv_all(tconn, integrity_alg, pi->size);
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003128 if (err)
3129 return err;
Philipp Reisner036b17e2011-05-16 17:38:11 +02003130 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003131 }
3132
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003133 if (pi->cmd != P_PROTOCOL_UPDATE) {
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003134 clear_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisner036b17e2011-05-16 17:38:11 +02003135
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003136 if (cf & CF_DRY_RUN)
3137 set_bit(CONN_DRY_RUN, &tconn->flags);
3138
3139 rcu_read_lock();
3140 nc = rcu_dereference(tconn->net_conf);
3141
3142 if (p_proto != nc->wire_protocol) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003143 conn_err(tconn, "incompatible %s settings\n", "protocol");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003144 goto disconnect_rcu_unlock;
3145 }
3146
3147 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003148 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003149 goto disconnect_rcu_unlock;
3150 }
3151
3152 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003153 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003154 goto disconnect_rcu_unlock;
3155 }
3156
3157 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003158 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003159 goto disconnect_rcu_unlock;
3160 }
3161
3162 if (p_discard_my_data && nc->discard_my_data) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003163 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003164 goto disconnect_rcu_unlock;
3165 }
3166
3167 if (p_two_primaries != nc->two_primaries) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003168 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003169 goto disconnect_rcu_unlock;
3170 }
3171
3172 if (strcmp(integrity_alg, nc->integrity_alg)) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003173 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003174 goto disconnect_rcu_unlock;
3175 }
3176
3177 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003178 }
3179
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003180 if (integrity_alg[0]) {
3181 int hash_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003182
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003183 /*
3184 * We can only change the peer data integrity algorithm
3185 * here. Changing our own data integrity algorithm
3186 * requires that we send a P_PROTOCOL_UPDATE packet at
3187 * the same time; otherwise, the peer has no way to
3188 * tell between which packets the algorithm should
3189 * change.
3190 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003191
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003192 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3193 if (!peer_integrity_tfm) {
3194 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3195 integrity_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003196 goto disconnect;
3197 }
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003198
3199 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3200 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3201 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3202 if (!(int_dig_in && int_dig_vv)) {
3203 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3204 goto disconnect;
3205 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206 }
3207
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003208 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3209 if (!new_net_conf) {
3210 conn_err(tconn, "Allocation of new net_conf failed\n");
3211 goto disconnect;
3212 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003213
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003214 mutex_lock(&tconn->data.mutex);
3215 mutex_lock(&tconn->conf_update);
3216 old_net_conf = tconn->net_conf;
3217 *new_net_conf = *old_net_conf;
3218
3219 new_net_conf->wire_protocol = p_proto;
3220 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3221 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3222 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3223 new_net_conf->two_primaries = p_two_primaries;
3224
3225 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3226 mutex_unlock(&tconn->conf_update);
3227 mutex_unlock(&tconn->data.mutex);
3228
3229 crypto_free_hash(tconn->peer_integrity_tfm);
3230 kfree(tconn->int_dig_in);
3231 kfree(tconn->int_dig_vv);
3232 tconn->peer_integrity_tfm = peer_integrity_tfm;
3233 tconn->int_dig_in = int_dig_in;
3234 tconn->int_dig_vv = int_dig_vv;
3235
3236 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3237 conn_info(tconn, "peer data-integrity-alg: %s\n",
3238 integrity_alg[0] ? integrity_alg : "(none)");
3239
3240 synchronize_rcu();
3241 kfree(old_net_conf);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003242 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003243
Philipp Reisner44ed1672011-04-19 17:10:19 +02003244disconnect_rcu_unlock:
3245 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003246disconnect:
Andreas Gruenbacherb792c352011-07-15 16:48:49 +02003247 crypto_free_hash(peer_integrity_tfm);
Philipp Reisner036b17e2011-05-16 17:38:11 +02003248 kfree(int_dig_in);
3249 kfree(int_dig_vv);
Philipp Reisner72046242011-03-15 18:51:47 +01003250 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003251 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003252}
3253
3254/* helper function
3255 * input: alg name, feature name
3256 * return: NULL (alg name was "")
3257 * ERR_PTR(error) if something goes wrong
3258 * or the crypto hash ptr, if it worked out ok. */
3259struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3260 const char *alg, const char *name)
3261{
3262 struct crypto_hash *tfm;
3263
3264 if (!alg[0])
3265 return NULL;
3266
3267 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3268 if (IS_ERR(tfm)) {
3269 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3270 alg, name, PTR_ERR(tfm));
3271 return tfm;
3272 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003273 return tfm;
3274}
3275
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003276static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003277{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003278 void *buffer = tconn->data.rbuf;
3279 int size = pi->size;
3280
3281 while (size) {
3282 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3283 s = drbd_recv(tconn, buffer, s);
3284 if (s <= 0) {
3285 if (s < 0)
3286 return s;
3287 break;
3288 }
3289 size -= s;
3290 }
3291 if (size)
3292 return -EIO;
3293 return 0;
3294}
3295
3296/*
3297 * config_unknown_volume - device configuration command for unknown volume
3298 *
3299 * When a device is added to an existing connection, the node on which the
3300 * device is added first will send configuration commands to its peer but the
3301 * peer will not know about the device yet. It will warn and ignore these
3302 * commands. Once the device is added on the second node, the second node will
3303 * send the same device configuration commands, but in the other direction.
3304 *
3305 * (We can also end up here if drbd is misconfigured.)
3306 */
3307static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3308{
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02003309 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3310 cmdname(pi->cmd), pi->vnr);
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003311 return ignore_remaining_packet(tconn, pi);
3312}
3313
3314static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3315{
3316 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003317 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003318 unsigned int header_size, data_size, exp_max_sz;
3319 struct crypto_hash *verify_tfm = NULL;
3320 struct crypto_hash *csums_tfm = NULL;
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003321 struct net_conf *old_net_conf, *new_net_conf = NULL;
Philipp Reisner813472c2011-05-03 16:47:02 +02003322 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003323 const int apv = tconn->agreed_pro_version;
Philipp Reisner813472c2011-05-03 16:47:02 +02003324 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Philipp Reisner778f2712010-07-06 11:14:00 +02003325 int fifo_size = 0;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003326 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003327
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003328 mdev = vnr_to_mdev(tconn, pi->vnr);
3329 if (!mdev)
3330 return config_unknown_volume(tconn, pi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003331
3332 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3333 : apv == 88 ? sizeof(struct p_rs_param)
3334 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003335 : apv <= 94 ? sizeof(struct p_rs_param_89)
3336 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003337
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003338 if (pi->size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003339 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003340 pi->size, exp_max_sz);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003341 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003342 }
3343
3344 if (apv <= 88) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003345 header_size = sizeof(struct p_rs_param);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003346 data_size = pi->size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003347 } else if (apv <= 94) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003348 header_size = sizeof(struct p_rs_param_89);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003349 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003350 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003351 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003352 header_size = sizeof(struct p_rs_param_95);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003353 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003354 D_ASSERT(data_size == 0);
3355 }
3356
3357 /* initialize verify_alg and csums_alg */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003358 p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003359 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3360
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003361 err = drbd_recv_all(mdev->tconn, p, header_size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003362 if (err)
3363 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003364
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003365 mutex_lock(&mdev->tconn->conf_update);
3366 old_net_conf = mdev->tconn->net_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02003367 if (get_ldev(mdev)) {
3368 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3369 if (!new_disk_conf) {
3370 put_ldev(mdev);
3371 mutex_unlock(&mdev->tconn->conf_update);
3372 dev_err(DEV, "Allocation of new disk_conf failed\n");
3373 return -ENOMEM;
3374 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003375
Philipp Reisner813472c2011-05-03 16:47:02 +02003376 old_disk_conf = mdev->ldev->disk_conf;
3377 *new_disk_conf = *old_disk_conf;
3378
Andreas Gruenbacher6394b932011-05-11 14:29:52 +02003379 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
Philipp Reisner813472c2011-05-03 16:47:02 +02003380 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003381
3382 if (apv >= 88) {
3383 if (apv == 88) {
Philipp Reisner5de73822012-03-28 10:17:32 +02003384 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3385 dev_err(DEV, "verify-alg of wrong size, "
3386 "peer wants %u, accepting only up to %u byte\n",
3387 data_size, SHARED_SECRET_MAX);
Philipp Reisner813472c2011-05-03 16:47:02 +02003388 err = -EIO;
3389 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390 }
3391
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003392 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
Philipp Reisner813472c2011-05-03 16:47:02 +02003393 if (err)
3394 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003395 /* we expect NUL terminated string */
3396 /* but just in case someone tries to be evil */
3397 D_ASSERT(p->verify_alg[data_size-1] == 0);
3398 p->verify_alg[data_size-1] = 0;
3399
3400 } else /* apv >= 89 */ {
3401 /* we still expect NUL terminated strings */
3402 /* but just in case someone tries to be evil */
3403 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3404 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3405 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3406 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3407 }
3408
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003409 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003410 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3411 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003412 old_net_conf->verify_alg, p->verify_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003413 goto disconnect;
3414 }
3415 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3416 p->verify_alg, "verify-alg");
3417 if (IS_ERR(verify_tfm)) {
3418 verify_tfm = NULL;
3419 goto disconnect;
3420 }
3421 }
3422
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003423 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003424 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3425 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003426 old_net_conf->csums_alg, p->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003427 goto disconnect;
3428 }
3429 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3430 p->csums_alg, "csums-alg");
3431 if (IS_ERR(csums_tfm)) {
3432 csums_tfm = NULL;
3433 goto disconnect;
3434 }
3435 }
3436
Philipp Reisner813472c2011-05-03 16:47:02 +02003437 if (apv > 94 && new_disk_conf) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003438 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3439 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3440 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3441 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02003442
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003443 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02003444 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02003445 new_plan = fifo_alloc(fifo_size);
3446 if (!new_plan) {
Philipp Reisner778f2712010-07-06 11:14:00 +02003447 dev_err(DEV, "kmalloc of fifo_buffer failed");
Lars Ellenbergf3990022011-03-23 14:31:09 +01003448 put_ldev(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02003449 goto disconnect;
3450 }
3451 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003452 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003453
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003454 if (verify_tfm || csums_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003455 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3456 if (!new_net_conf) {
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003457 dev_err(DEV, "Allocation of new net_conf failed\n");
3458 goto disconnect;
3459 }
3460
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003461 *new_net_conf = *old_net_conf;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003462
3463 if (verify_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003464 strcpy(new_net_conf->verify_alg, p->verify_alg);
3465 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003466 crypto_free_hash(mdev->tconn->verify_tfm);
3467 mdev->tconn->verify_tfm = verify_tfm;
3468 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3469 }
3470 if (csums_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003471 strcpy(new_net_conf->csums_alg, p->csums_alg);
3472 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003473 crypto_free_hash(mdev->tconn->csums_tfm);
3474 mdev->tconn->csums_tfm = csums_tfm;
3475 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3476 }
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003477 rcu_assign_pointer(tconn->net_conf, new_net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003478 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003479 }
3480
Philipp Reisner813472c2011-05-03 16:47:02 +02003481 if (new_disk_conf) {
3482 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3483 put_ldev(mdev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003484 }
Philipp Reisner813472c2011-05-03 16:47:02 +02003485
3486 if (new_plan) {
3487 old_plan = mdev->rs_plan_s;
3488 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3489 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003490
3491 mutex_unlock(&mdev->tconn->conf_update);
3492 synchronize_rcu();
3493 if (new_net_conf)
3494 kfree(old_net_conf);
3495 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02003496 kfree(old_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003497
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003498 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003499
Philipp Reisner813472c2011-05-03 16:47:02 +02003500reconnect:
3501 if (new_disk_conf) {
3502 put_ldev(mdev);
3503 kfree(new_disk_conf);
3504 }
3505 mutex_unlock(&mdev->tconn->conf_update);
3506 return -EIO;
3507
Philipp Reisnerb411b362009-09-25 16:07:19 -07003508disconnect:
Philipp Reisner813472c2011-05-03 16:47:02 +02003509 kfree(new_plan);
3510 if (new_disk_conf) {
3511 put_ldev(mdev);
3512 kfree(new_disk_conf);
3513 }
Philipp Reisnera0095502011-05-03 13:14:15 +02003514 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003515 /* just for completeness: actually not needed,
3516 * as this is not reached if csums_tfm was ok. */
3517 crypto_free_hash(csums_tfm);
3518 /* but free the verify_tfm again, if csums_tfm did not work out */
3519 crypto_free_hash(verify_tfm);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003520 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003521 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003522}
3523
Philipp Reisnerb411b362009-09-25 16:07:19 -07003524/* warn if the arguments differ by more than 12.5% */
3525static void warn_if_differ_considerably(struct drbd_conf *mdev,
3526 const char *s, sector_t a, sector_t b)
3527{
3528 sector_t d;
3529 if (a == 0 || b == 0)
3530 return;
3531 d = (a > b) ? (a - b) : (b - a);
3532 if (d > (a>>3) || d > (b>>3))
3533 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3534 (unsigned long long)a, (unsigned long long)b);
3535}
3536
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003537static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003538{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003539 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003540 struct p_sizes *p = pi->data;
Philipp Reisnere96c9632013-06-25 16:50:07 +02003541 enum determine_dev_size dd = DS_UNCHANGED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003542 sector_t p_size, p_usize, my_usize;
3543 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003544 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003545
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003546 mdev = vnr_to_mdev(tconn, pi->vnr);
3547 if (!mdev)
3548 return config_unknown_volume(tconn, pi);
3549
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550 p_size = be64_to_cpu(p->d_size);
3551 p_usize = be64_to_cpu(p->u_size);
3552
Philipp Reisnerb411b362009-09-25 16:07:19 -07003553 /* just store the peer's disk size for now.
3554 * we still need to figure out whether we accept that. */
3555 mdev->p_size = p_size;
3556
Philipp Reisnerb411b362009-09-25 16:07:19 -07003557 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003558 rcu_read_lock();
3559 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3560 rcu_read_unlock();
3561
Philipp Reisnerb411b362009-09-25 16:07:19 -07003562 warn_if_differ_considerably(mdev, "lower level device sizes",
3563 p_size, drbd_get_max_capacity(mdev->ldev));
3564 warn_if_differ_considerably(mdev, "user requested size",
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003565 p_usize, my_usize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003566
3567 /* if this is the first connect, or an otherwise expected
3568 * param exchange, choose the minimum */
3569 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003570 p_usize = min_not_zero(my_usize, p_usize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003571
3572 /* Never shrink a device with usable data during connect.
3573 But allow online shrinking if we are connected. */
Philipp Reisneref5e44a2011-05-03 13:27:43 +02003574 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003575 drbd_get_capacity(mdev->this_bdev) &&
3576 mdev->state.disk >= D_OUTDATED &&
3577 mdev->state.conn < C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578 dev_err(DEV, "The peer's disk size is too small!\n");
Philipp Reisner38fa9982011-03-15 18:24:49 +01003579 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003581 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003582 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003583
3584 if (my_usize != p_usize) {
3585 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3586
3587 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3588 if (!new_disk_conf) {
3589 dev_err(DEV, "Allocation of new disk_conf failed\n");
3590 put_ldev(mdev);
3591 return -ENOMEM;
3592 }
3593
3594 mutex_lock(&mdev->tconn->conf_update);
3595 old_disk_conf = mdev->ldev->disk_conf;
3596 *new_disk_conf = *old_disk_conf;
3597 new_disk_conf->disk_size = p_usize;
3598
3599 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3600 mutex_unlock(&mdev->tconn->conf_update);
3601 synchronize_rcu();
3602 kfree(old_disk_conf);
3603
3604 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3605 (unsigned long)my_usize);
3606 }
3607
Philipp Reisnerb411b362009-09-25 16:07:19 -07003608 put_ldev(mdev);
3609 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003610
Philipp Reisnere89b5912010-03-24 17:11:33 +01003611 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003612 if (get_ldev(mdev)) {
Philipp Reisnerd752b262013-06-25 16:50:08 +02003613 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 put_ldev(mdev);
Philipp Reisnere96c9632013-06-25 16:50:07 +02003615 if (dd == DS_ERROR)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003616 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003617 drbd_md_sync(mdev);
3618 } else {
3619 /* I am diskless, need to accept the peer's size. */
3620 drbd_set_my_capacity(mdev, p_size);
3621 }
3622
Philipp Reisner99432fc2011-05-20 16:39:13 +02003623 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3624 drbd_reconsider_max_bio_size(mdev);
3625
Philipp Reisnerb411b362009-09-25 16:07:19 -07003626 if (get_ldev(mdev)) {
3627 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3628 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3629 ldsc = 1;
3630 }
3631
Philipp Reisnerb411b362009-09-25 16:07:19 -07003632 put_ldev(mdev);
3633 }
3634
3635 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3636 if (be64_to_cpu(p->c_size) !=
3637 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3638 /* we have different sizes, probably peer
3639 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003640 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003641 }
3642 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
Philipp Reisnere96c9632013-06-25 16:50:07 +02003643 (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003644 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003645 mdev->state.disk >= D_INCONSISTENT) {
3646 if (ddsf & DDSF_NO_RESYNC)
3647 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3648 else
3649 resync_after_online_grow(mdev);
3650 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003651 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3652 }
3653 }
3654
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003655 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003656}
3657
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003658static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003659{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003660 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003661 struct p_uuids *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003662 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003663 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003664
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003665 mdev = vnr_to_mdev(tconn, pi->vnr);
3666 if (!mdev)
3667 return config_unknown_volume(tconn, pi);
3668
Philipp Reisnerb411b362009-09-25 16:07:19 -07003669 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
Jing Wang063eacf2012-10-25 15:00:56 +08003670 if (!p_uuid) {
3671 dev_err(DEV, "kmalloc of p_uuid failed\n");
3672 return false;
3673 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003674
3675 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3676 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3677
3678 kfree(mdev->p_uuid);
3679 mdev->p_uuid = p_uuid;
3680
3681 if (mdev->state.conn < C_CONNECTED &&
3682 mdev->state.disk < D_INCONSISTENT &&
3683 mdev->state.role == R_PRIMARY &&
3684 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3685 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3686 (unsigned long long)mdev->ed_uuid);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003687 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003688 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003689 }
3690
3691 if (get_ldev(mdev)) {
3692 int skip_initial_sync =
3693 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003694 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003695 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3696 (p_uuid[UI_FLAGS] & 8);
3697 if (skip_initial_sync) {
3698 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3699 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003700 "clear_n_write from receive_uuids",
3701 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003702 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3703 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3704 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3705 CS_VERBOSE, NULL);
3706 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003707 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003708 }
3709 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003710 } else if (mdev->state.disk < D_INCONSISTENT &&
3711 mdev->state.role == R_PRIMARY) {
3712 /* I am a diskless primary, the peer just created a new current UUID
3713 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003714 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003715 }
3716
3717 /* Before we test for the disk state, we should wait until an eventually
3718 ongoing cluster wide state change is finished. That is important if
3719 we are primary and are detaching from our disk. We need to see the
3720 new disk state... */
Philipp Reisner8410da82011-02-11 20:11:10 +01003721 mutex_lock(mdev->state_mutex);
3722 mutex_unlock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003723 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003724 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3725
3726 if (updated_uuids)
3727 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003728
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003729 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003730}
3731
3732/**
3733 * convert_state() - Converts the peer's view of the cluster state to our point of view
3734 * @ps: The state as seen by the peer.
3735 */
3736static union drbd_state convert_state(union drbd_state ps)
3737{
3738 union drbd_state ms;
3739
3740 static enum drbd_conns c_tab[] = {
Philipp Reisner369bea62011-07-06 23:04:44 +02003741 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
Philipp Reisnerb411b362009-09-25 16:07:19 -07003742 [C_CONNECTED] = C_CONNECTED,
3743
3744 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3745 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3746 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3747 [C_VERIFY_S] = C_VERIFY_T,
3748 [C_MASK] = C_MASK,
3749 };
3750
3751 ms.i = ps.i;
3752
3753 ms.conn = c_tab[ps.conn];
3754 ms.peer = ps.role;
3755 ms.role = ps.peer;
3756 ms.pdsk = ps.disk;
3757 ms.disk = ps.pdsk;
3758 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3759
3760 return ms;
3761}
3762
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003763static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003764{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003765 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003766 struct p_req_state *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003767 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003768 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003769
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003770 mdev = vnr_to_mdev(tconn, pi->vnr);
3771 if (!mdev)
3772 return -EIO;
3773
Philipp Reisnerb411b362009-09-25 16:07:19 -07003774 mask.i = be32_to_cpu(p->mask);
3775 val.i = be32_to_cpu(p->val);
3776
Lars Ellenberg427c0432012-08-01 12:43:01 +02003777 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
Philipp Reisner8410da82011-02-11 20:11:10 +01003778 mutex_is_locked(mdev->state_mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003779 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003780 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781 }
3782
3783 mask = convert_state(mask);
3784 val = convert_state(val);
3785
3786 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003787 drbd_send_sr_reply(mdev, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003788
Philipp Reisnerb411b362009-09-25 16:07:19 -07003789 drbd_md_sync(mdev);
3790
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003791 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003792}
3793
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003794static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003795{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003796 struct p_req_state *p = pi->data;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003797 union drbd_state mask, val;
3798 enum drbd_state_rv rv;
3799
3800 mask.i = be32_to_cpu(p->mask);
3801 val.i = be32_to_cpu(p->val);
3802
Lars Ellenberg427c0432012-08-01 12:43:01 +02003803 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003804 mutex_is_locked(&tconn->cstate_mutex)) {
3805 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003806 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003807 }
3808
3809 mask = convert_state(mask);
3810 val = convert_state(val);
3811
Philipp Reisner778bcf22011-03-28 12:55:03 +02003812 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003813 conn_send_sr_reply(tconn, rv);
3814
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003815 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003816}
3817
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003818static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003819{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003820 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003821 struct p_state *p = pi->data;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003822 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003823 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003824 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003825 int rv;
3826
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003827 mdev = vnr_to_mdev(tconn, pi->vnr);
3828 if (!mdev)
3829 return config_unknown_volume(tconn, pi);
3830
Philipp Reisnerb411b362009-09-25 16:07:19 -07003831 peer_state.i = be32_to_cpu(p->state);
3832
3833 real_peer_disk = peer_state.disk;
3834 if (peer_state.disk == D_NEGOTIATING) {
3835 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3836 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3837 }
3838
Philipp Reisner87eeee42011-01-19 14:16:30 +01003839 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003840 retry:
Philipp Reisner78bae592011-03-28 15:40:12 +02003841 os = ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003842 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003843
Lars Ellenberg545752d2011-12-05 14:39:25 +01003844 /* If some other part of the code (asender thread, timeout)
3845 * already decided to close the connection again,
3846 * we must not "re-establish" it here. */
3847 if (os.conn <= C_TEAR_DOWN)
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003848 return -ECONNRESET;
Lars Ellenberg545752d2011-12-05 14:39:25 +01003849
Lars Ellenberg40424e42011-09-26 15:24:56 +02003850 /* If this is the "end of sync" confirmation, usually the peer disk
3851 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3852 * set) resync started in PausedSyncT, or if the timing of pause-/
3853 * unpause-sync events has been "just right", the peer disk may
3854 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3855 */
3856 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3857 real_peer_disk == D_UP_TO_DATE &&
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003858 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3859 /* If we are (becoming) SyncSource, but peer is still in sync
3860 * preparation, ignore its uptodate-ness to avoid flapping, it
3861 * will change to inconsistent once the peer reaches active
3862 * syncing states.
3863 * It may have changed syncer-paused flags, however, so we
3864 * cannot ignore this completely. */
3865 if (peer_state.conn > C_CONNECTED &&
3866 peer_state.conn < C_SYNC_SOURCE)
3867 real_peer_disk = D_INCONSISTENT;
3868
3869 /* if peer_state changes to connected at the same time,
3870 * it explicitly notifies us that it finished resync.
3871 * Maybe we should finish it up, too? */
3872 else if (os.conn >= C_SYNC_SOURCE &&
3873 peer_state.conn == C_CONNECTED) {
3874 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3875 drbd_resync_finished(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003876 return 0;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003877 }
3878 }
3879
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003880 /* explicit verify finished notification, stop sector reached. */
3881 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3882 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003883 ov_out_of_sync_print(mdev);
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003884 drbd_resync_finished(mdev);
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003885 return 0;
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003886 }
3887
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003888 /* peer says his disk is inconsistent, while we think it is uptodate,
3889 * and this happens while the peer still thinks we have a sync going on,
3890 * but we think we are already done with the sync.
3891 * We ignore this to avoid flapping pdsk.
3892 * This should not happen, if the peer is a recent version of drbd. */
3893 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3894 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3895 real_peer_disk = D_UP_TO_DATE;
3896
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003897 if (ns.conn == C_WF_REPORT_PARAMS)
3898 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003899
Philipp Reisner67531712010-10-27 12:21:30 +02003900 if (peer_state.conn == C_AHEAD)
3901 ns.conn = C_BEHIND;
3902
Philipp Reisnerb411b362009-09-25 16:07:19 -07003903 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3904 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3905 int cr; /* consider resync */
3906
3907 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003908 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003909 /* if we had an established connection
3910 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003911 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003912 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003913 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003914 /* if we have both been inconsistent, and the peer has been
3915 * forced to be UpToDate with --overwrite-data */
3916 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3917 /* if we had been plain connected, and the admin requested to
3918 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003919 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003920 (peer_state.conn >= C_STARTING_SYNC_S &&
3921 peer_state.conn <= C_WF_BITMAP_T));
3922
3923 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003924 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003925
3926 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003927 if (ns.conn == C_MASK) {
3928 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003929 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003930 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003931 } else if (peer_state.disk == D_NEGOTIATING) {
3932 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3933 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003934 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003935 } else {
Philipp Reisner8169e412011-03-15 18:40:27 +01003936 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003937 return -EIO;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003938 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003939 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003940 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003941 }
3942 }
3943 }
3944
Philipp Reisner87eeee42011-01-19 14:16:30 +01003945 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02003946 if (os.i != drbd_read_state(mdev).i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003947 goto retry;
3948 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003949 ns.peer = peer_state.role;
3950 ns.pdsk = real_peer_disk;
3951 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003952 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003953 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003954 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
Philipp Reisner2aebfab2011-03-28 16:48:11 +02003955 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003956 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003957 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003958 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003959 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003960 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01003961 tl_clear(mdev->tconn);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003962 drbd_uuid_new_current(mdev);
3963 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003964 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003965 return -EIO;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003966 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003967 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisner78bae592011-03-28 15:40:12 +02003968 ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003969 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003970
3971 if (rv < SS_SUCCESS) {
Philipp Reisner38fa9982011-03-15 18:24:49 +01003972 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003973 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003974 }
3975
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003976 if (os.conn > C_WF_REPORT_PARAMS) {
3977 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003978 peer_state.disk != D_NEGOTIATING ) {
3979 /* we want resync, peer has not yet decided to sync... */
3980 /* Nowadays only used when forcing a node into primary role and
3981 setting its disk to UpToDate with that */
3982 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02003983 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003984 }
3985 }
3986
Philipp Reisner08b165b2011-09-05 16:22:33 +02003987 clear_bit(DISCARD_MY_DATA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003988
Lars Ellenbergcccac982013-03-19 18:16:46 +01003989 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003990
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003991 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003992}
3993
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003994static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003995{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003996 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003997 struct p_rs_uuid *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003998
3999 mdev = vnr_to_mdev(tconn, pi->vnr);
4000 if (!mdev)
4001 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004002
4003 wait_event(mdev->misc_wait,
4004 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004005 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07004006 mdev->state.conn < C_CONNECTED ||
4007 mdev->state.disk < D_NEGOTIATING);
4008
4009 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4010
Philipp Reisnerb411b362009-09-25 16:07:19 -07004011 /* Here the _drbd_uuid_ functions are right, current should
4012 _not_ be rotated into the history */
4013 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4014 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4015 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4016
Lars Ellenberg62b0da32011-01-20 13:25:21 +01004017 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004018 drbd_start_resync(mdev, C_SYNC_TARGET);
4019
4020 put_ldev(mdev);
4021 } else
4022 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4023
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004024 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004025}
4026
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004027/**
4028 * receive_bitmap_plain
4029 *
4030 * Return 0 when done, 1 when another iteration is needed, and a negative error
4031 * code upon failure.
4032 */
4033static int
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004034receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004035 unsigned long *p, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004036{
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004037 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4038 drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004039 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004040 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004041 unsigned int want = num_words * sizeof(*p);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004042 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004043
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004044 if (want != size) {
4045 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004046 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004047 }
4048 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004049 return 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004050 err = drbd_recv_all(mdev->tconn, p, want);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004051 if (err)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004052 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004053
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004054 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004055
4056 c->word_offset += num_words;
4057 c->bit_offset = c->word_offset * BITS_PER_LONG;
4058 if (c->bit_offset > c->bm_bits)
4059 c->bit_offset = c->bm_bits;
4060
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004061 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004062}
4063
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004064static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4065{
4066 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4067}
4068
4069static int dcbp_get_start(struct p_compressed_bm *p)
4070{
4071 return (p->encoding & 0x80) != 0;
4072}
4073
4074static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4075{
4076 return (p->encoding >> 4) & 0x7;
4077}
4078
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004079/**
4080 * recv_bm_rle_bits
4081 *
4082 * Return 0 when done, 1 when another iteration is needed, and a negative error
4083 * code upon failure.
4084 */
4085static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07004086recv_bm_rle_bits(struct drbd_conf *mdev,
4087 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01004088 struct bm_xfer_ctx *c,
4089 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004090{
4091 struct bitstream bs;
4092 u64 look_ahead;
4093 u64 rl;
4094 u64 tmp;
4095 unsigned long s = c->bit_offset;
4096 unsigned long e;
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004097 int toggle = dcbp_get_start(p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004098 int have;
4099 int bits;
4100
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004101 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004102
4103 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4104 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004105 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106
4107 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4108 bits = vli_decode_bits(&rl, look_ahead);
4109 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004110 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004111
4112 if (toggle) {
4113 e = s + rl -1;
4114 if (e >= c->bm_bits) {
4115 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004116 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 }
4118 _drbd_bm_set_bits(mdev, s, e);
4119 }
4120
4121 if (have < bits) {
4122 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4123 have, bits, look_ahead,
4124 (unsigned int)(bs.cur.b - p->code),
4125 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004126 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004127 }
Lars Ellenbergd2da5b02013-10-23 10:59:18 +02004128 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4129 if (likely(bits < 64))
4130 look_ahead >>= bits;
4131 else
4132 look_ahead = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004133 have -= bits;
4134
4135 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4136 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004137 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004138 look_ahead |= tmp << have;
4139 have += bits;
4140 }
4141
4142 c->bit_offset = s;
4143 bm_xfer_ctx_bit_to_word_offset(c);
4144
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004145 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004146}
4147
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004148/**
4149 * decode_bitmap_c
4150 *
4151 * Return 0 when done, 1 when another iteration is needed, and a negative error
4152 * code upon failure.
4153 */
4154static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07004155decode_bitmap_c(struct drbd_conf *mdev,
4156 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01004157 struct bm_xfer_ctx *c,
4158 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004159{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004160 if (dcbp_get_code(p) == RLE_VLI_Bits)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004161 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004162
4163 /* other variants had been implemented for evaluation,
4164 * but have been dropped as this one turned out to be "best"
4165 * during all our tests. */
4166
4167 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
Philipp Reisner38fa9982011-03-15 18:24:49 +01004168 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004169 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004170}
4171
4172void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4173 const char *direction, struct bm_xfer_ctx *c)
4174{
4175 /* what would it take to transfer it "plaintext" */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004176 unsigned int header_size = drbd_header_size(mdev->tconn);
4177 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4178 unsigned int plain =
4179 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4180 c->bm_words * sizeof(unsigned long);
4181 unsigned int total = c->bytes[0] + c->bytes[1];
4182 unsigned int r;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004183
4184 /* total can not be zero. but just in case: */
4185 if (total == 0)
4186 return;
4187
4188 /* don't report if not compressed */
4189 if (total >= plain)
4190 return;
4191
4192 /* total < plain. check for overflow, still */
4193 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4194 : (1000 * total / plain);
4195
4196 if (r > 1000)
4197 r = 1000;
4198
4199 r = 1000 - r;
4200 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4201 "total %u; compression: %u.%u%%\n",
4202 direction,
4203 c->bytes[1], c->packets[1],
4204 c->bytes[0], c->packets[0],
4205 total, r/10, r % 10);
4206}
4207
4208/* Since we are processing the bitfield from lower addresses to higher,
4209 it does not matter if the process it in 32 bit chunks or 64 bit
4210 chunks as long as it is little endian. (Understand it as byte stream,
4211 beginning with the lowest byte...) If we would use big endian
4212 we would need to process it from the highest address to the lowest,
4213 in order to be agnostic to the 32 vs 64 bits issue.
4214
4215 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004216static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004217{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004218 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004219 struct bm_xfer_ctx c;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004220 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004221
4222 mdev = vnr_to_mdev(tconn, pi->vnr);
4223 if (!mdev)
4224 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004225
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004226 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4227 /* you are supposed to send additional out-of-sync information
4228 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004229
Philipp Reisnerb411b362009-09-25 16:07:19 -07004230 c = (struct bm_xfer_ctx) {
4231 .bm_bits = drbd_bm_bits(mdev),
4232 .bm_words = drbd_bm_words(mdev),
4233 };
4234
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004235 for(;;) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004236 if (pi->cmd == P_BITMAP)
4237 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4238 else if (pi->cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004239 /* MAYBE: sanity check that we speak proto >= 90,
4240 * and the feature is enabled! */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004241 struct p_compressed_bm *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004242
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004243 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244 dev_err(DEV, "ReportCBitmap packet too large\n");
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004245 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004246 goto out;
4247 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004248 if (pi->size <= sizeof(*p)) {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004249 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004250 err = -EIO;
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01004251 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004252 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004253 err = drbd_recv_all(mdev->tconn, p, pi->size);
4254 if (err)
4255 goto out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004256 err = decode_bitmap_c(mdev, p, &c, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004257 } else {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004258 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004259 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004260 goto out;
4261 }
4262
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004263 c.packets[pi->cmd == P_BITMAP]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004264 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004265
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004266 if (err <= 0) {
4267 if (err < 0)
4268 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004269 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004270 }
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004271 err = drbd_recv_header(mdev->tconn, pi);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004272 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004273 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004274 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004275
4276 INFO_bm_xfer_stats(mdev, "receive", &c);
4277
4278 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01004279 enum drbd_state_rv rv;
4280
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004281 err = drbd_send_bitmap(mdev);
4282 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004283 goto out;
4284 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01004285 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4286 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004287 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4288 /* admin may have requested C_DISCONNECTING,
4289 * other threads may have noticed network errors */
4290 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4291 drbd_conn_str(mdev->state.conn));
4292 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004293 err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004294
Philipp Reisnerb411b362009-09-25 16:07:19 -07004295 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004296 drbd_bm_unlock(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004297 if (!err && mdev->state.conn == C_WF_BITMAP_S)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004298 drbd_start_resync(mdev, C_SYNC_SOURCE);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004299 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004300}
4301
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004302static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004303{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004304 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004305 pi->cmd, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004306
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004307 return ignore_remaining_packet(tconn, pi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004308}
4309
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004310static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004311{
Philipp Reisnerb411b362009-09-25 16:07:19 -07004312 /* Make sure we've acked all the TCP data associated
4313 * with the data requests being unplugged */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004314 drbd_tcp_quickack(tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004315
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004316 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004317}
4318
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004319static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner73a01a12010-10-27 14:33:00 +02004320{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004321 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004322 struct p_block_desc *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004323
4324 mdev = vnr_to_mdev(tconn, pi->vnr);
4325 if (!mdev)
4326 return -EIO;
Philipp Reisner73a01a12010-10-27 14:33:00 +02004327
Lars Ellenbergf735e362010-12-17 21:06:18 +01004328 switch (mdev->state.conn) {
4329 case C_WF_SYNC_UUID:
4330 case C_WF_BITMAP_T:
4331 case C_BEHIND:
4332 break;
4333 default:
4334 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4335 drbd_conn_str(mdev->state.conn));
4336 }
4337
Philipp Reisner73a01a12010-10-27 14:33:00 +02004338 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4339
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004340 return 0;
Philipp Reisner73a01a12010-10-27 14:33:00 +02004341}
4342
Philipp Reisner02918be2010-08-20 14:35:10 +02004343struct data_cmd {
4344 int expect_payload;
4345 size_t pkt_size;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004346 int (*fn)(struct drbd_tconn *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004347};
4348
Philipp Reisner02918be2010-08-20 14:35:10 +02004349static struct data_cmd drbd_cmd_handler[] = {
4350 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4351 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4352 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4353 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004354 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4355 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4356 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
Philipp Reisner02918be2010-08-20 14:35:10 +02004357 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4358 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004359 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4360 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
Philipp Reisner02918be2010-08-20 14:35:10 +02004361 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4362 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4363 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4364 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4365 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4366 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4367 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4368 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4369 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4370 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02004371 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004372 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
Philipp Reisner036b17e2011-05-16 17:38:11 +02004373 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
Philipp Reisner02918be2010-08-20 14:35:10 +02004374};
4375
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004376static void drbdd(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004377{
Philipp Reisner77351055b2011-02-07 17:24:26 +01004378 struct packet_info pi;
Philipp Reisner02918be2010-08-20 14:35:10 +02004379 size_t shs; /* sub header size */
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004380 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004381
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004382 while (get_t_state(&tconn->receiver) == RUNNING) {
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004383 struct data_cmd *cmd;
4384
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004385 drbd_thread_current_set_cpu(&tconn->receiver);
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004386 if (drbd_recv_header(tconn, &pi))
Philipp Reisner02918be2010-08-20 14:35:10 +02004387 goto err_out;
4388
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004389 cmd = &drbd_cmd_handler[pi.cmd];
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004390 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004391 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4392 cmdname(pi.cmd), pi.cmd);
Philipp Reisner02918be2010-08-20 14:35:10 +02004393 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01004394 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004395
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004396 shs = cmd->pkt_size;
4397 if (pi.size > shs && !cmd->expect_payload) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004398 conn_err(tconn, "No payload expected %s l:%d\n",
4399 cmdname(pi.cmd), pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004400 goto err_out;
4401 }
4402
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004403 if (shs) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004404 err = drbd_recv_all_warn(tconn, pi.data, shs);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004405 if (err)
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004406 goto err_out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004407 pi.size -= shs;
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004408 }
4409
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004410 err = cmd->fn(tconn, &pi);
4411 if (err) {
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004412 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4413 cmdname(pi.cmd), err, pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004414 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004415 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004416 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004417 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004418
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004419 err_out:
4420 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004421}
4422
Philipp Reisner0e29d162011-02-18 14:23:11 +01004423void conn_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004424{
4425 struct drbd_wq_barrier barr;
4426
4427 barr.w.cb = w_prev_work_done;
Philipp Reisner0e29d162011-02-18 14:23:11 +01004428 barr.w.tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004429 init_completion(&barr.done);
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01004430 drbd_queue_work(&tconn->sender_work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004431 wait_for_completion(&barr.done);
4432}
4433
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004434static void conn_disconnect(struct drbd_tconn *tconn)
Philipp Reisnerf70b3512010-06-24 14:34:40 +02004435{
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004436 struct drbd_conf *mdev;
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004437 enum drbd_conns oc;
Philipp Reisner376694a2011-11-07 10:54:28 +01004438 int vnr;
Philipp Reisnerf70b3512010-06-24 14:34:40 +02004439
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004440 if (tconn->cstate == C_STANDALONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004441 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004442
Lars Ellenberg545752d2011-12-05 14:39:25 +01004443 /* We are about to start the cleanup after connection loss.
4444 * Make sure drbd_make_request knows about that.
4445 * Usually we should be in some network failure state already,
4446 * but just in case we are not, we fix it up here.
4447 */
Philipp Reisnerb8853db2011-12-13 11:09:16 +01004448 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Lars Ellenberg545752d2011-12-05 14:39:25 +01004449
Philipp Reisnerb411b362009-09-25 16:07:19 -07004450 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisner360cc742011-02-08 14:29:53 +01004451 drbd_thread_stop(&tconn->asender);
4452 drbd_free_sock(tconn);
4453
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004454 rcu_read_lock();
4455 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4456 kref_get(&mdev->kref);
4457 rcu_read_unlock();
4458 drbd_disconnected(mdev);
4459 kref_put(&mdev->kref, &drbd_minor_destroy);
4460 rcu_read_lock();
4461 }
4462 rcu_read_unlock();
4463
Philipp Reisner12038a32011-11-09 19:18:00 +01004464 if (!list_empty(&tconn->current_epoch->list))
4465 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4466 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4467 atomic_set(&tconn->current_epoch->epoch_size, 0);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01004468 tconn->send.seen_any_write_yet = false;
Philipp Reisner12038a32011-11-09 19:18:00 +01004469
Philipp Reisner360cc742011-02-08 14:29:53 +01004470 conn_info(tconn, "Connection closed\n");
4471
Philipp Reisnercb703452011-03-24 11:03:07 +01004472 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4473 conn_try_outdate_peer_async(tconn);
4474
Philipp Reisner360cc742011-02-08 14:29:53 +01004475 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004476 oc = tconn->cstate;
4477 if (oc >= C_UNCONNECTED)
Philipp Reisner376694a2011-11-07 10:54:28 +01004478 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004479
Philipp Reisner360cc742011-02-08 14:29:53 +01004480 spin_unlock_irq(&tconn->req_lock);
4481
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02004482 if (oc == C_DISCONNECTING)
Lars Ellenbergd9cc6e22011-04-27 10:25:28 +02004483 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
Philipp Reisner360cc742011-02-08 14:29:53 +01004484}
4485
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004486static int drbd_disconnected(struct drbd_conf *mdev)
Philipp Reisner360cc742011-02-08 14:29:53 +01004487{
Philipp Reisner360cc742011-02-08 14:29:53 +01004488 unsigned int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004489
Philipp Reisner85719572010-07-21 10:20:17 +02004490 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01004491 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004492 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4493 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4494 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004495 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004496
4497 /* We do not have data structures that would allow us to
4498 * get the rs_pending_cnt down to 0 again.
4499 * * On C_SYNC_TARGET we do not have any data structures describing
4500 * the pending RSDataRequest's we have sent.
4501 * * On C_SYNC_SOURCE there is no data structure that tracks
4502 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4503 * And no, it is not the sum of the reference counts in the
4504 * resync_LRU. The resync_LRU tracks the whole operation including
4505 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4506 * on the fly. */
4507 drbd_rs_cancel_all(mdev);
4508 mdev->rs_total = 0;
4509 mdev->rs_failed = 0;
4510 atomic_set(&mdev->rs_pending_cnt, 0);
4511 wake_up(&mdev->misc_wait);
4512
Philipp Reisnerb411b362009-09-25 16:07:19 -07004513 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004514 resync_timer_fn((unsigned long)mdev);
4515
Philipp Reisnerb411b362009-09-25 16:07:19 -07004516 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4517 * w_make_resync_request etc. which may still be on the worker queue
4518 * to be "canceled" */
4519 drbd_flush_workqueue(mdev);
4520
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004521 drbd_finish_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004522
Philipp Reisnerd10b4ea2011-11-30 23:25:36 +01004523 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4524 might have issued a work again. The one before drbd_finish_peer_reqs() is
4525 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4526 drbd_flush_workqueue(mdev);
4527
Lars Ellenberg08332d72012-08-17 15:09:13 +02004528 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4529 * again via drbd_try_clear_on_disk_bm(). */
4530 drbd_rs_cancel_all(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004531
4532 kfree(mdev->p_uuid);
4533 mdev->p_uuid = NULL;
4534
Philipp Reisner2aebfab2011-03-28 16:48:11 +02004535 if (!drbd_suspended(mdev))
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004536 tl_clear(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004537
4538 drbd_md_sync(mdev);
4539
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004540 /* serialize with bitmap writeout triggered by the state change,
4541 * if any. */
4542 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4543
Philipp Reisnerb411b362009-09-25 16:07:19 -07004544 /* tcp_close and release of sendpage pages can be deferred. I don't
4545 * want to use SO_LINGER, because apparently it can be deferred for
4546 * more than 20 seconds (longest time I checked).
4547 *
4548 * Actually we don't care for exactly when the network stack does its
4549 * put_page(), but release our reference on these pages right here.
4550 */
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02004551 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004552 if (i)
4553 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02004554 i = atomic_read(&mdev->pp_in_use_by_net);
4555 if (i)
4556 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004557 i = atomic_read(&mdev->pp_in_use);
4558 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02004559 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004560
4561 D_ASSERT(list_empty(&mdev->read_ee));
4562 D_ASSERT(list_empty(&mdev->active_ee));
4563 D_ASSERT(list_empty(&mdev->sync_ee));
4564 D_ASSERT(list_empty(&mdev->done_ee));
4565
Philipp Reisner360cc742011-02-08 14:29:53 +01004566 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004567}
4568
4569/*
4570 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4571 * we can agree on is stored in agreed_pro_version.
4572 *
4573 * feature flags and the reserved array should be enough room for future
4574 * enhancements of the handshake protocol, and possible plugins...
4575 *
4576 * for now, they are expected to be zero, but ignored.
4577 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004578static int drbd_send_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004579{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004580 struct drbd_socket *sock;
4581 struct p_connection_features *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004582
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004583 sock = &tconn->data;
4584 p = conn_prepare_command(tconn, sock);
4585 if (!p)
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004586 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004587 memset(p, 0, sizeof(*p));
4588 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4589 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004590 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004591}
4592
4593/*
4594 * return values:
4595 * 1 yes, we have a valid connection
4596 * 0 oops, did not work out, please try again
4597 * -1 peer talks different language,
4598 * no point in trying again, please go standalone.
4599 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004600static int drbd_do_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004601{
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004602 /* ASSERT current == tconn->receiver ... */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004603 struct p_connection_features *p;
4604 const int expect = sizeof(struct p_connection_features);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004605 struct packet_info pi;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004606 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004607
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004608 err = drbd_send_features(tconn);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004609 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004610 return 0;
4611
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004612 err = drbd_recv_header(tconn, &pi);
4613 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004614 return 0;
4615
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004616 if (pi.cmd != P_CONNECTION_FEATURES) {
4617 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004618 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004619 return -1;
4620 }
4621
Philipp Reisner77351055b2011-02-07 17:24:26 +01004622 if (pi.size != expect) {
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004623 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004624 expect, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004625 return -1;
4626 }
4627
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004628 p = pi.data;
4629 err = drbd_recv_all_warn(tconn, p, expect);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004630 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004631 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004632
Philipp Reisnerb411b362009-09-25 16:07:19 -07004633 p->protocol_min = be32_to_cpu(p->protocol_min);
4634 p->protocol_max = be32_to_cpu(p->protocol_max);
4635 if (p->protocol_max == 0)
4636 p->protocol_max = p->protocol_min;
4637
4638 if (PRO_VERSION_MAX < p->protocol_min ||
4639 PRO_VERSION_MIN > p->protocol_max)
4640 goto incompat;
4641
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004642 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004643
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004644 conn_info(tconn, "Handshake successful: "
4645 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004646
4647 return 1;
4648
4649 incompat:
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004650 conn_err(tconn, "incompatible DRBD dialects: "
Philipp Reisnerb411b362009-09-25 16:07:19 -07004651 "I support %d-%d, peer supports %d-%d\n",
4652 PRO_VERSION_MIN, PRO_VERSION_MAX,
4653 p->protocol_min, p->protocol_max);
4654 return -1;
4655}
4656
4657#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
Philipp Reisner13e60372011-02-08 09:54:40 +01004658static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004659{
Philipp Reisneref57f9e2013-03-27 14:08:44 +01004660 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4661 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004662 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004663}
4664#else
4665#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004666
4667/* Return value:
4668 1 - auth succeeded,
4669 0 - failed, try again (network error),
4670 -1 - auth failed, don't try again.
4671*/
4672
Philipp Reisner13e60372011-02-08 09:54:40 +01004673static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004674{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004675 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004676 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4677 struct scatterlist sg;
4678 char *response = NULL;
4679 char *right_response = NULL;
4680 char *peers_ch = NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +02004681 unsigned int key_len;
4682 char secret[SHARED_SECRET_MAX]; /* 64 byte */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004683 unsigned int resp_size;
4684 struct hash_desc desc;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004685 struct packet_info pi;
Philipp Reisner44ed1672011-04-19 17:10:19 +02004686 struct net_conf *nc;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004687 int err, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004688
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004689 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4690
Philipp Reisner44ed1672011-04-19 17:10:19 +02004691 rcu_read_lock();
4692 nc = rcu_dereference(tconn->net_conf);
4693 key_len = strlen(nc->shared_secret);
4694 memcpy(secret, nc->shared_secret, key_len);
4695 rcu_read_unlock();
4696
Philipp Reisner13e60372011-02-08 09:54:40 +01004697 desc.tfm = tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004698 desc.flags = 0;
4699
Philipp Reisner44ed1672011-04-19 17:10:19 +02004700 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004701 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004702 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004703 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004704 goto fail;
4705 }
4706
4707 get_random_bytes(my_challenge, CHALLENGE_LEN);
4708
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004709 sock = &tconn->data;
4710 if (!conn_prepare_command(tconn, sock)) {
4711 rv = 0;
4712 goto fail;
4713 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004714 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004715 my_challenge, CHALLENGE_LEN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004716 if (!rv)
4717 goto fail;
4718
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004719 err = drbd_recv_header(tconn, &pi);
4720 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004721 rv = 0;
4722 goto fail;
4723 }
4724
Philipp Reisner77351055b2011-02-07 17:24:26 +01004725 if (pi.cmd != P_AUTH_CHALLENGE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004726 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004727 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004728 rv = 0;
4729 goto fail;
4730 }
4731
Philipp Reisner77351055b2011-02-07 17:24:26 +01004732 if (pi.size > CHALLENGE_LEN * 2) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004733 conn_err(tconn, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004734 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004735 goto fail;
4736 }
4737
Philipp Reisner77351055b2011-02-07 17:24:26 +01004738 peers_ch = kmalloc(pi.size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004739 if (peers_ch == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004740 conn_err(tconn, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004741 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004742 goto fail;
4743 }
4744
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004745 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4746 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004747 rv = 0;
4748 goto fail;
4749 }
4750
Philipp Reisner13e60372011-02-08 09:54:40 +01004751 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004752 response = kmalloc(resp_size, GFP_NOIO);
4753 if (response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004754 conn_err(tconn, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004755 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004756 goto fail;
4757 }
4758
4759 sg_init_table(&sg, 1);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004760 sg_set_buf(&sg, peers_ch, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004761
4762 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4763 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004764 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004765 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004766 goto fail;
4767 }
4768
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004769 if (!conn_prepare_command(tconn, sock)) {
4770 rv = 0;
4771 goto fail;
4772 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004773 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004774 response, resp_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004775 if (!rv)
4776 goto fail;
4777
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004778 err = drbd_recv_header(tconn, &pi);
4779 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004780 rv = 0;
4781 goto fail;
4782 }
4783
Philipp Reisner77351055b2011-02-07 17:24:26 +01004784 if (pi.cmd != P_AUTH_RESPONSE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004785 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004786 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004787 rv = 0;
4788 goto fail;
4789 }
4790
Philipp Reisner77351055b2011-02-07 17:24:26 +01004791 if (pi.size != resp_size) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004792 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004793 rv = 0;
4794 goto fail;
4795 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004796
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004797 err = drbd_recv_all_warn(tconn, response , resp_size);
4798 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004799 rv = 0;
4800 goto fail;
4801 }
4802
4803 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004804 if (right_response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004805 conn_err(tconn, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004806 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004807 goto fail;
4808 }
4809
4810 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4811
4812 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4813 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004814 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004815 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004816 goto fail;
4817 }
4818
4819 rv = !memcmp(response, right_response, resp_size);
4820
4821 if (rv)
Philipp Reisner44ed1672011-04-19 17:10:19 +02004822 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4823 resp_size);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004824 else
4825 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004826
4827 fail:
4828 kfree(peers_ch);
4829 kfree(response);
4830 kfree(right_response);
4831
4832 return rv;
4833}
4834#endif
4835
4836int drbdd_init(struct drbd_thread *thi)
4837{
Philipp Reisner392c8802011-02-09 10:33:31 +01004838 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004839 int h;
4840
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004841 conn_info(tconn, "receiver (re)started\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004842
4843 do {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004844 h = conn_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004845 if (h == 0) {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004846 conn_disconnect(tconn);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004847 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004848 }
4849 if (h == -1) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004850 conn_warn(tconn, "Discarding network configuration.\n");
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004851 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004852 }
4853 } while (h == 0);
4854
Philipp Reisner91fd4da2011-04-20 17:47:29 +02004855 if (h > 0)
4856 drbdd(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004857
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004858 conn_disconnect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004859
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004860 conn_info(tconn, "receiver terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004861 return 0;
4862}
4863
4864/* ********* acknowledge sender ******** */
4865
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004866static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004867{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004868 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004869 int retcode = be32_to_cpu(p->retcode);
4870
4871 if (retcode >= SS_SUCCESS) {
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004872 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004873 } else {
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004874 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4875 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4876 drbd_set_st_err_str(retcode), retcode);
4877 }
4878 wake_up(&tconn->ping_wait);
4879
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004880 return 0;
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004881}
4882
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004883static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004884{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004885 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004886 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004887 int retcode = be32_to_cpu(p->retcode);
4888
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004889 mdev = vnr_to_mdev(tconn, pi->vnr);
4890 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004891 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004892
Philipp Reisner4d0fc3f2012-01-20 13:52:27 +01004893 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4894 D_ASSERT(tconn->agreed_pro_version < 100);
4895 return got_conn_RqSReply(tconn, pi);
4896 }
4897
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004898 if (retcode >= SS_SUCCESS) {
4899 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4900 } else {
4901 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004902 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004903 drbd_set_st_err_str(retcode), retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004904 }
4905 wake_up(&mdev->state_wait);
4906
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004907 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004908}
4909
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004910static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004911{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004912 return drbd_send_ping_ack(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004913
4914}
4915
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004916static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004917{
4918 /* restore idle timeout */
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01004919 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4920 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4921 wake_up(&tconn->ping_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004922
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004923 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004924}
4925
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004926static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004927{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004928 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004929 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004930 sector_t sector = be64_to_cpu(p->sector);
4931 int blksize = be32_to_cpu(p->blksize);
4932
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004933 mdev = vnr_to_mdev(tconn, pi->vnr);
4934 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004935 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004936
Philipp Reisner31890f42011-01-19 14:12:51 +01004937 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004938
4939 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4940
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004941 if (get_ldev(mdev)) {
4942 drbd_rs_complete_io(mdev, sector);
4943 drbd_set_in_sync(mdev, sector, blksize);
4944 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4945 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4946 put_ldev(mdev);
4947 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004948 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004949 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004950
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004951 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004952}
4953
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004954static int
4955validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4956 struct rb_root *root, const char *func,
4957 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004958{
4959 struct drbd_request *req;
4960 struct bio_and_error m;
4961
Philipp Reisner87eeee42011-01-19 14:16:30 +01004962 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004963 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004964 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004965 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004966 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004967 }
4968 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004969 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004970
4971 if (m.bio)
4972 complete_master_bio(mdev, &m);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004973 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004974}
4975
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004976static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004977{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004978 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004979 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004980 sector_t sector = be64_to_cpu(p->sector);
4981 int blksize = be32_to_cpu(p->blksize);
4982 enum drbd_req_event what;
4983
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004984 mdev = vnr_to_mdev(tconn, pi->vnr);
4985 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004986 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004987
Philipp Reisnerb411b362009-09-25 16:07:19 -07004988 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4989
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004990 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004991 drbd_set_in_sync(mdev, sector, blksize);
4992 dec_rs_pending(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004993 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004994 }
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004995 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004996 case P_RS_WRITE_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004997 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004998 break;
4999 case P_WRITE_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005000 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005001 break;
5002 case P_RECV_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005003 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005004 break;
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02005005 case P_SUPERSEDED:
5006 what = CONFLICT_RESOLVED;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005007 break;
5008 case P_RETRY_WRITE:
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005009 what = POSTPONE_WRITE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005010 break;
5011 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005012 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07005013 }
5014
5015 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005016 &mdev->write_requests, __func__,
5017 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005018}
5019
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005020static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005021{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005022 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005023 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005024 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01005025 int size = be32_to_cpu(p->blksize);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005026 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005027
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005028 mdev = vnr_to_mdev(tconn, pi->vnr);
5029 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005030 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005031
5032 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5033
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01005034 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07005035 dec_rs_pending(mdev);
5036 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005037 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005038 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01005039
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005040 err = validate_req_change_req_state(mdev, p->block_id, sector,
5041 &mdev->write_requests, __func__,
Philipp Reisner303d1442011-04-13 16:24:47 -07005042 NEG_ACKED, true);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005043 if (err) {
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01005044 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5045 The master bio might already be completed, therefore the
5046 request is no longer in the collision hash. */
5047 /* In Protocol B we might already have got a P_RECV_ACK
5048 but then get a P_NEG_ACK afterwards. */
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01005049 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01005050 }
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005051 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005052}
5053
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005054static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005055{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005056 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005057 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005058 sector_t sector = be64_to_cpu(p->sector);
5059
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005060 mdev = vnr_to_mdev(tconn, pi->vnr);
5061 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005062 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005063
Philipp Reisnerb411b362009-09-25 16:07:19 -07005064 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005065
Philipp Reisner380207d2011-11-11 12:31:20 +01005066 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07005067 (unsigned long long)sector, be32_to_cpu(p->blksize));
5068
5069 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005070 &mdev->read_requests, __func__,
5071 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005072}
5073
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005074static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005075{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005076 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005077 sector_t sector;
5078 int size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005079 struct p_block_ack *p = pi->data;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005080
5081 mdev = vnr_to_mdev(tconn, pi->vnr);
5082 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005083 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005084
5085 sector = be64_to_cpu(p->sector);
5086 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005087
5088 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5089
5090 dec_rs_pending(mdev);
5091
5092 if (get_ldev_if_state(mdev, D_FAILED)) {
5093 drbd_rs_complete_io(mdev, sector);
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01005094 switch (pi->cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01005095 case P_NEG_RS_DREPLY:
5096 drbd_rs_failed_io(mdev, sector, size);
5097 case P_RS_CANCEL:
5098 break;
5099 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005100 BUG();
Philipp Reisnerd612d302010-12-27 10:53:28 +01005101 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005102 put_ldev(mdev);
5103 }
5104
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005105 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005106}
5107
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005108static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005109{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005110 struct p_barrier_ack *p = pi->data;
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005111 struct drbd_conf *mdev;
5112 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005113
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005114 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
Philipp Reisnerb411b362009-09-25 16:07:19 -07005115
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005116 rcu_read_lock();
5117 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5118 if (mdev->state.conn == C_AHEAD &&
5119 atomic_read(&mdev->ap_in_flight) == 0 &&
5120 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5121 mdev->start_resync_timer.expires = jiffies + HZ;
5122 add_timer(&mdev->start_resync_timer);
5123 }
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02005124 }
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005125 rcu_read_unlock();
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02005126
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005127 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005128}
5129
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005130static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005131{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005132 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005133 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005134 struct drbd_work *w;
5135 sector_t sector;
5136 int size;
5137
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005138 mdev = vnr_to_mdev(tconn, pi->vnr);
5139 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005140 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005141
Philipp Reisnerb411b362009-09-25 16:07:19 -07005142 sector = be64_to_cpu(p->sector);
5143 size = be32_to_cpu(p->blksize);
5144
5145 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5146
5147 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005148 drbd_ov_out_of_sync_found(mdev, sector, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005149 else
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005150 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005151
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005152 if (!get_ldev(mdev))
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005153 return 0;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005154
Philipp Reisnerb411b362009-09-25 16:07:19 -07005155 drbd_rs_complete_io(mdev, sector);
5156 dec_rs_pending(mdev);
5157
Lars Ellenbergea5442a2010-11-05 09:48:01 +01005158 --mdev->ov_left;
5159
5160 /* let's advance progress step marks only for every other megabyte */
5161 if ((mdev->ov_left & 0x200) == 0x200)
5162 drbd_advance_rs_marks(mdev, mdev->ov_left);
5163
5164 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07005165 w = kmalloc(sizeof(*w), GFP_NOIO);
5166 if (w) {
5167 w->cb = w_ov_finished;
Philipp Reisnera21e9292011-02-08 15:08:49 +01005168 w->mdev = mdev;
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01005169 drbd_queue_work(&mdev->tconn->sender_work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005170 } else {
5171 dev_err(DEV, "kmalloc(w) failed.");
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005172 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005173 drbd_resync_finished(mdev);
5174 }
5175 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005176 put_ldev(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005177 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005178}
5179
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005180static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02005181{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005182 return 0;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02005183}
5184
Andreas Gruenbachera990be42011-04-06 17:56:48 +02005185static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005186{
Philipp Reisner082a3432011-03-15 16:05:42 +01005187 struct drbd_conf *mdev;
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005188 int vnr, not_empty = 0;
Philipp Reisner32862ec2011-02-08 16:41:01 +01005189
5190 do {
5191 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5192 flush_signals(current);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005193
5194 rcu_read_lock();
5195 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5196 kref_get(&mdev->kref);
5197 rcu_read_unlock();
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07005198 if (drbd_finish_peer_reqs(mdev)) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005199 kref_put(&mdev->kref, &drbd_minor_destroy);
5200 return 1;
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07005201 }
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005202 kref_put(&mdev->kref, &drbd_minor_destroy);
5203 rcu_read_lock();
Philipp Reisner082a3432011-03-15 16:05:42 +01005204 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005205 set_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisner082a3432011-03-15 16:05:42 +01005206
5207 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005208 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Philipp Reisner082a3432011-03-15 16:05:42 +01005209 not_empty = !list_empty(&mdev->done_ee);
5210 if (not_empty)
5211 break;
5212 }
5213 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005214 rcu_read_unlock();
Philipp Reisner32862ec2011-02-08 16:41:01 +01005215 } while (not_empty);
5216
5217 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005218}
5219
5220struct asender_cmd {
5221 size_t pkt_size;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005222 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005223};
5224
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005225static struct asender_cmd asender_tbl[] = {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005226 [P_PING] = { 0, got_Ping },
5227 [P_PING_ACK] = { 0, got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005228 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5229 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5230 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02005231 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005232 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5233 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005234 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005235 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5236 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5237 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5238 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02005239 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005240 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5241 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5242 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005243};
Philipp Reisnerb411b362009-09-25 16:07:19 -07005244
5245int drbd_asender(struct drbd_thread *thi)
5246{
Philipp Reisner392c8802011-02-09 10:33:31 +01005247 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005248 struct asender_cmd *cmd = NULL;
Philipp Reisner77351055b2011-02-07 17:24:26 +01005249 struct packet_info pi;
Philipp Reisner257d0af2011-01-26 12:15:29 +01005250 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005251 void *buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005252 int received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005253 unsigned int header_size = drbd_header_size(tconn);
5254 int expect = header_size;
Philipp Reisner44ed1672011-04-19 17:10:19 +02005255 bool ping_timeout_active = false;
5256 struct net_conf *nc;
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005257 int ping_timeo, tcp_cork, ping_int;
Philipp Reisner3990e042013-03-27 14:08:48 +01005258 struct sched_param param = { .sched_priority = 2 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07005259
Philipp Reisner3990e042013-03-27 14:08:48 +01005260 rv = sched_setscheduler(current, SCHED_RR, &param);
5261 if (rv < 0)
5262 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005263
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01005264 while (get_t_state(thi) == RUNNING) {
Philipp Reisner80822282011-02-08 12:46:30 +01005265 drbd_thread_current_set_cpu(thi);
Philipp Reisner44ed1672011-04-19 17:10:19 +02005266
5267 rcu_read_lock();
5268 nc = rcu_dereference(tconn->net_conf);
5269 ping_timeo = nc->ping_timeo;
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005270 tcp_cork = nc->tcp_cork;
Philipp Reisner44ed1672011-04-19 17:10:19 +02005271 ping_int = nc->ping_int;
5272 rcu_read_unlock();
5273
Philipp Reisner32862ec2011-02-08 16:41:01 +01005274 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
Andreas Gruenbachera17647a2011-04-01 12:49:42 +02005275 if (drbd_send_ping(tconn)) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005276 conn_err(tconn, "drbd_send_ping has failed\n");
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01005277 goto reconnect;
5278 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02005279 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5280 ping_timeout_active = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005281 }
5282
Philipp Reisner32862ec2011-02-08 16:41:01 +01005283 /* TODO: conditionally cork; it may hurt latency if we cork without
5284 much to send */
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005285 if (tcp_cork)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005286 drbd_tcp_cork(tconn->meta.socket);
Andreas Gruenbachera990be42011-04-06 17:56:48 +02005287 if (tconn_finish_peer_reqs(tconn)) {
5288 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
Philipp Reisner32862ec2011-02-08 16:41:01 +01005289 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005290 }
5291 /* but unconditionally uncork unless disabled */
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005292 if (tcp_cork)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005293 drbd_tcp_uncork(tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005294
5295 /* short circuit, recv_msg would return EINTR anyways. */
5296 if (signal_pending(current))
5297 continue;
5298
Philipp Reisner32862ec2011-02-08 16:41:01 +01005299 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5300 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005301
5302 flush_signals(current);
5303
5304 /* Note:
5305 * -EINTR (on meta) we got a signal
5306 * -EAGAIN (on meta) rcvtimeo expired
5307 * -ECONNRESET other side closed the connection
5308 * -ERESTARTSYS (on data) we got a signal
5309 * rv < 0 other than above: unexpected error!
5310 * rv == expected: full header or command
5311 * rv < expected: "woken" by signal during receive
5312 * rv == 0 : "connection shut down by peer"
5313 */
5314 if (likely(rv > 0)) {
5315 received += rv;
5316 buf += rv;
5317 } else if (rv == 0) {
Philipp Reisnerb66623e2012-08-08 21:19:09 +02005318 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5319 long t;
5320 rcu_read_lock();
5321 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5322 rcu_read_unlock();
5323
5324 t = wait_event_timeout(tconn->ping_wait,
5325 tconn->cstate < C_WF_REPORT_PARAMS,
5326 t);
Philipp Reisner599377a2012-08-17 14:50:22 +02005327 if (t)
5328 break;
5329 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005330 conn_err(tconn, "meta connection shut down by peer.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005331 goto reconnect;
5332 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02005333 /* If the data socket received something meanwhile,
5334 * that is good enough: peer is still alive. */
Philipp Reisner32862ec2011-02-08 16:41:01 +01005335 if (time_after(tconn->last_received,
5336 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02005337 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01005338 if (ping_timeout_active) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005339 conn_err(tconn, "PingAck did not arrive in time.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005340 goto reconnect;
5341 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005342 set_bit(SEND_PING, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005343 continue;
5344 } else if (rv == -EINTR) {
5345 continue;
5346 } else {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005347 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005348 goto reconnect;
5349 }
5350
5351 if (received == expect && cmd == NULL) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005352 if (decode_header(tconn, tconn->meta.rbuf, &pi))
Philipp Reisnerb411b362009-09-25 16:07:19 -07005353 goto reconnect;
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005354 cmd = &asender_tbl[pi.cmd];
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005355 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02005356 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5357 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005358 goto disconnect;
5359 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005360 expect = header_size + cmd->pkt_size;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005361 if (pi.size != expect - header_size) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005362 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01005363 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005364 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01005365 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005366 }
5367 if (received == expect) {
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005368 bool err;
Philipp Reisnera4fbda82011-03-16 11:13:17 +01005369
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005370 err = cmd->fn(tconn, &pi);
5371 if (err) {
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005372 conn_err(tconn, "%pf failed\n", cmd->fn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005373 goto reconnect;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005374 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005375
Philipp Reisnera4fbda82011-03-16 11:13:17 +01005376 tconn->last_received = jiffies;
Lars Ellenbergf36af182011-03-09 22:44:55 +01005377
Philipp Reisner44ed1672011-04-19 17:10:19 +02005378 if (cmd == &asender_tbl[P_PING_ACK]) {
5379 /* restore idle timeout */
5380 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5381 ping_timeout_active = false;
5382 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005383
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005384 buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005385 received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005386 expect = header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005387 cmd = NULL;
5388 }
5389 }
5390
5391 if (0) {
5392reconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01005393 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Philipp Reisner19fffd72012-08-28 16:48:03 +02005394 conn_md_sync(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005395 }
5396 if (0) {
5397disconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01005398 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005399 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005400 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005401
Philipp Reisner32862ec2011-02-08 16:41:01 +01005402 conn_info(tconn, "asender terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005403
5404 return 0;
5405}