block: split out request-only flags into a new namespace
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / scsi / device_handler / scsi_dh_alua.c
CommitLineData
057ea7c9
HR
1/*
2 * Generic SCSI-3 ALUA SCSI Device Handler
3 *
69723d17 4 * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
057ea7c9
HR
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
5a0e3ad6 22#include <linux/slab.h>
69723d17 23#include <linux/delay.h>
acf3368f 24#include <linux/module.h>
a7089770 25#include <asm/unaligned.h>
057ea7c9 26#include <scsi/scsi.h>
cb0a168c 27#include <scsi/scsi_proto.h>
80bd68d6 28#include <scsi/scsi_dbg.h>
057ea7c9
HR
29#include <scsi/scsi_eh.h>
30#include <scsi/scsi_dh.h>
31
32#define ALUA_DH_NAME "alua"
e79c82cc 33#define ALUA_DH_VER "2.0"
057ea7c9 34
057ea7c9
HR
35#define TPGS_SUPPORT_NONE 0x00
36#define TPGS_SUPPORT_OPTIMIZED 0x01
37#define TPGS_SUPPORT_NONOPTIMIZED 0x02
38#define TPGS_SUPPORT_STANDBY 0x04
39#define TPGS_SUPPORT_UNAVAILABLE 0x08
69723d17 40#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
057ea7c9
HR
41#define TPGS_SUPPORT_OFFLINE 0x40
42#define TPGS_SUPPORT_TRANSITION 0x80
43
3588c5a2
RE
44#define RTPG_FMT_MASK 0x70
45#define RTPG_FMT_EXT_HDR 0x10
46
057ea7c9
HR
47#define TPGS_MODE_UNINITIALIZED -1
48#define TPGS_MODE_NONE 0x0
49#define TPGS_MODE_IMPLICIT 0x1
50#define TPGS_MODE_EXPLICIT 0x2
51
c49c8345 52#define ALUA_RTPG_SIZE 128
3588c5a2 53#define ALUA_FAILOVER_TIMEOUT 60
057ea7c9 54#define ALUA_FAILOVER_RETRIES 5
03197b61 55#define ALUA_RTPG_DELAY_MSECS 5
057ea7c9 56
6c4fc044 57/* device handler flags */
03197b61
HR
58#define ALUA_OPTIMIZE_STPG 0x01
59#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
00642a1b 60#define ALUA_SYNC_STPG 0x04
03197b61
HR
61/* State machine flags */
62#define ALUA_PG_RUN_RTPG 0x10
63#define ALUA_PG_RUN_STPG 0x20
64#define ALUA_PG_RUNNING 0x40
4335d092 65
aa90f490
HR
66static uint optimize_stpg;
67module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
69
43394c67
HR
70static LIST_HEAD(port_group_list);
71static DEFINE_SPINLOCK(port_group_lock);
03197b61 72static struct workqueue_struct *kaluad_wq;
00642a1b 73static struct workqueue_struct *kaluad_sync_wq;
43394c67
HR
74
75struct alua_port_group {
76 struct kref kref;
03197b61 77 struct rcu_head rcu;
43394c67 78 struct list_head node;
cb0a168c 79 struct list_head dh_list;
0047220c
HR
80 unsigned char device_id_str[256];
81 int device_id_len;
057ea7c9 82 int group_id;
057ea7c9
HR
83 int tpgs;
84 int state;
dcd3a754 85 int pref;
4335d092 86 unsigned flags; /* used for optimizing STPG */
3588c5a2 87 unsigned char transition_tmo;
03197b61
HR
88 unsigned long expiry;
89 unsigned long interval;
90 struct delayed_work rtpg_work;
91 spinlock_t lock;
92 struct list_head rtpg_list;
93 struct scsi_device *rtpg_sdev;
43394c67
HR
94};
95
96struct alua_dh_data {
cb0a168c 97 struct list_head node;
43394c67
HR
98 struct alua_port_group *pg;
99 int group_id;
03197b61 100 spinlock_t pg_lock;
96e65865 101 struct scsi_device *sdev;
03197b61
HR
102 int init_error;
103 struct mutex init_mutex;
104};
105
106struct alua_queue_data {
107 struct list_head entry;
96e65865
CS
108 activate_complete callback_fn;
109 void *callback_data;
057ea7c9
HR
110};
111
112#define ALUA_POLICY_SWITCH_CURRENT 0
113#define ALUA_POLICY_SWITCH_ALL 1
114
03197b61
HR
115static void alua_rtpg_work(struct work_struct *work);
116static void alua_rtpg_queue(struct alua_port_group *pg,
117 struct scsi_device *sdev,
2b35865e
HR
118 struct alua_queue_data *qdata, bool force);
119static void alua_check(struct scsi_device *sdev, bool force);
96e65865 120
43394c67
HR
121static void release_port_group(struct kref *kref)
122{
123 struct alua_port_group *pg;
124
125 pg = container_of(kref, struct alua_port_group, kref);
03197b61
HR
126 if (pg->rtpg_sdev)
127 flush_delayed_work(&pg->rtpg_work);
43394c67
HR
128 spin_lock(&port_group_lock);
129 list_del(&pg->node);
130 spin_unlock(&port_group_lock);
03197b61 131 kfree_rcu(pg, rcu);
43394c67
HR
132}
133
057ea7c9
HR
134/*
135 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
136 * @sdev: sdev the command should be sent to
137 */
40bb61a7
HR
138static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
139 int bufflen, struct scsi_sense_hdr *sshdr, int flags)
057ea7c9 140{
40bb61a7
HR
141 u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)];
142 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
143 REQ_FAILFAST_DRIVER;
057ea7c9
HR
144
145 /* Prepare the command. */
40bb61a7
HR
146 memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN));
147 cdb[0] = MAINTENANCE_IN;
d42ae5f3 148 if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
40bb61a7 149 cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
8e67ce60 150 else
40bb61a7
HR
151 cdb[1] = MI_REPORT_TARGET_PGS;
152 put_unaligned_be32(bufflen, &cdb[6]);
153
154 return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
155 buff, bufflen, sshdr,
156 ALUA_FAILOVER_TIMEOUT * HZ,
e8064021
CH
157 ALUA_FAILOVER_RETRIES, NULL,
158 req_flags, 0);
057ea7c9
HR
159}
160
96e65865 161/*
b2460756 162 * submit_stpg - Issue a SET TARGET PORT GROUP command
057ea7c9
HR
163 *
164 * Currently we're only setting the current target port group state
165 * to 'active/optimized' and let the array firmware figure out
166 * the states of the remaining groups.
167 */
40bb61a7
HR
168static int submit_stpg(struct scsi_device *sdev, int group_id,
169 struct scsi_sense_hdr *sshdr)
057ea7c9 170{
40bb61a7 171 u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)];
b2460756 172 unsigned char stpg_data[8];
057ea7c9 173 int stpg_len = 8;
40bb61a7
HR
174 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
175 REQ_FAILFAST_DRIVER;
057ea7c9
HR
176
177 /* Prepare the data buffer */
b2460756 178 memset(stpg_data, 0, stpg_len);
5115fc7e 179 stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
b2460756 180 put_unaligned_be16(group_id, &stpg_data[6]);
057ea7c9 181
057ea7c9 182 /* Prepare the command. */
40bb61a7
HR
183 memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT));
184 cdb[0] = MAINTENANCE_OUT;
185 cdb[1] = MO_SET_TARGET_PGS;
186 put_unaligned_be32(stpg_len, &cdb[6]);
187
188 return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
189 stpg_data, stpg_len,
190 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
e8064021
CH
191 ALUA_FAILOVER_RETRIES, NULL,
192 req_flags, 0);
057ea7c9
HR
193}
194
1f275f97
BVA
195static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
196 int group_id)
0047220c
HR
197{
198 struct alua_port_group *pg;
199
fe8b9534
HR
200 if (!id_str || !id_size || !strlen(id_str))
201 return NULL;
202
0047220c
HR
203 list_for_each_entry(pg, &port_group_list, node) {
204 if (pg->group_id != group_id)
205 continue;
fe8b9534 206 if (!pg->device_id_len || pg->device_id_len != id_size)
0047220c
HR
207 continue;
208 if (strncmp(pg->device_id_str, id_str, id_size))
209 continue;
210 if (!kref_get_unless_zero(&pg->kref))
211 continue;
212 return pg;
213 }
214
215 return NULL;
216}
217
43394c67
HR
218/*
219 * alua_alloc_pg - Allocate a new port_group structure
220 * @sdev: scsi device
221 * @h: alua device_handler data
222 * @group_id: port group id
223 *
224 * Allocate a new port_group structure for a given
225 * device.
226 */
1f275f97
BVA
227static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
228 int group_id, int tpgs)
43394c67 229{
0047220c 230 struct alua_port_group *pg, *tmp_pg;
43394c67
HR
231
232 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
233 if (!pg)
0047220c 234 return ERR_PTR(-ENOMEM);
43394c67 235
0047220c
HR
236 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
237 sizeof(pg->device_id_str));
238 if (pg->device_id_len <= 0) {
239 /*
fe8b9534
HR
240 * TPGS supported but no device identification found.
241 * Generate private device identification.
0047220c 242 */
0047220c
HR
243 sdev_printk(KERN_INFO, sdev,
244 "%s: No device descriptors found\n",
245 ALUA_DH_NAME);
fe8b9534
HR
246 pg->device_id_str[0] = '\0';
247 pg->device_id_len = 0;
0047220c 248 }
43394c67
HR
249 pg->group_id = group_id;
250 pg->tpgs = tpgs;
5115fc7e 251 pg->state = SCSI_ACCESS_STATE_OPTIMAL;
aa90f490
HR
252 if (optimize_stpg)
253 pg->flags |= ALUA_OPTIMIZE_STPG;
43394c67 254 kref_init(&pg->kref);
03197b61
HR
255 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
256 INIT_LIST_HEAD(&pg->rtpg_list);
257 INIT_LIST_HEAD(&pg->node);
cb0a168c 258 INIT_LIST_HEAD(&pg->dh_list);
03197b61 259 spin_lock_init(&pg->lock);
0047220c 260
43394c67 261 spin_lock(&port_group_lock);
0047220c
HR
262 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
263 group_id);
264 if (tmp_pg) {
265 spin_unlock(&port_group_lock);
266 kfree(pg);
267 return tmp_pg;
268 }
269
43394c67
HR
270 list_add(&pg->node, &port_group_list);
271 spin_unlock(&port_group_lock);
272
273 return pg;
274}
275
057ea7c9 276/*
d7c48feb 277 * alua_check_tpgs - Evaluate TPGS setting
057ea7c9
HR
278 * @sdev: device to be checked
279 *
d7c48feb 280 * Examine the TPGS setting of the sdev to find out if ALUA
057ea7c9
HR
281 * is supported.
282 */
ad0ea64c 283static int alua_check_tpgs(struct scsi_device *sdev)
057ea7c9 284{
ad0ea64c 285 int tpgs = TPGS_MODE_NONE;
057ea7c9 286
db5a6a60
HR
287 /*
288 * ALUA support for non-disk devices is fraught with
289 * difficulties, so disable it for now.
290 */
291 if (sdev->type != TYPE_DISK) {
db5a6a60
HR
292 sdev_printk(KERN_INFO, sdev,
293 "%s: disable for non-disk devices\n",
294 ALUA_DH_NAME);
ad0ea64c 295 return tpgs;
db5a6a60
HR
296 }
297
ad0ea64c
HR
298 tpgs = scsi_device_tpgs(sdev);
299 switch (tpgs) {
057ea7c9
HR
300 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
301 sdev_printk(KERN_INFO, sdev,
302 "%s: supports implicit and explicit TPGS\n",
303 ALUA_DH_NAME);
304 break;
305 case TPGS_MODE_EXPLICIT:
306 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
307 ALUA_DH_NAME);
308 break;
309 case TPGS_MODE_IMPLICIT:
310 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
311 ALUA_DH_NAME);
312 break;
6cc05d45 313 case TPGS_MODE_NONE:
057ea7c9
HR
314 sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
315 ALUA_DH_NAME);
057ea7c9 316 break;
6cc05d45
HR
317 default:
318 sdev_printk(KERN_INFO, sdev,
319 "%s: unsupported TPGS setting %d\n",
ad0ea64c
HR
320 ALUA_DH_NAME, tpgs);
321 tpgs = TPGS_MODE_NONE;
6cc05d45 322 break;
057ea7c9
HR
323 }
324
ad0ea64c 325 return tpgs;
057ea7c9
HR
326}
327
328/*
9b80dcec 329 * alua_check_vpd - Evaluate INQUIRY vpd page 0x83
057ea7c9
HR
330 * @sdev: device to be checked
331 *
332 * Extract the relative target port and the target port group
333 * descriptor from the list of identificators.
334 */
a4253fde
HR
335static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
336 int tpgs)
057ea7c9 337{
83ea0e5e 338 int rel_port = -1, group_id;
03197b61 339 struct alua_port_group *pg, *old_pg = NULL;
100bcb85 340 bool pg_updated = false;
cb0a168c 341 unsigned long flags;
057ea7c9 342
83ea0e5e
HR
343 group_id = scsi_vpd_tpg_id(sdev, &rel_port);
344 if (group_id < 0) {
057ea7c9
HR
345 /*
346 * Internal error; TPGS supported but required
347 * VPD identification descriptors not present.
348 * Disable ALUA support
349 */
350 sdev_printk(KERN_INFO, sdev,
351 "%s: No target port descriptors found\n",
352 ALUA_DH_NAME);
9b80dcec 353 return SCSI_DH_DEV_UNSUPP;
057ea7c9 354 }
a4253fde 355
03197b61
HR
356 pg = alua_alloc_pg(sdev, group_id, tpgs);
357 if (IS_ERR(pg)) {
358 if (PTR_ERR(pg) == -ENOMEM)
a4253fde
HR
359 return SCSI_DH_NOMEM;
360 return SCSI_DH_DEV_UNSUPP;
361 }
fe8b9534
HR
362 if (pg->device_id_len)
363 sdev_printk(KERN_INFO, sdev,
364 "%s: device %s port group %x rel port %x\n",
365 ALUA_DH_NAME, pg->device_id_str,
366 group_id, rel_port);
367 else
368 sdev_printk(KERN_INFO, sdev,
369 "%s: port group %x rel port %x\n",
370 ALUA_DH_NAME, group_id, rel_port);
03197b61
HR
371
372 /* Check for existing port group references */
373 spin_lock(&h->pg_lock);
374 old_pg = h->pg;
375 if (old_pg != pg) {
376 /* port group has changed. Update to new port group */
cb0a168c
HR
377 if (h->pg) {
378 spin_lock_irqsave(&old_pg->lock, flags);
379 list_del_rcu(&h->node);
380 spin_unlock_irqrestore(&old_pg->lock, flags);
381 }
03197b61 382 rcu_assign_pointer(h->pg, pg);
cb0a168c 383 pg_updated = true;
03197b61 384 }
cb0a168c
HR
385
386 spin_lock_irqsave(&pg->lock, flags);
851cde99
HR
387 if (sdev->synchronous_alua)
388 pg->flags |= ALUA_SYNC_STPG;
cb0a168c
HR
389 if (pg_updated)
390 list_add_rcu(&h->node, &pg->dh_list);
391 spin_unlock_irqrestore(&pg->lock, flags);
392
2b35865e 393 alua_rtpg_queue(h->pg, sdev, NULL, true);
03197b61
HR
394 spin_unlock(&h->pg_lock);
395
396 if (old_pg)
397 kref_put(&old_pg->kref, release_port_group);
057ea7c9 398
03197b61 399 return SCSI_DH_OK;
057ea7c9
HR
400}
401
5115fc7e 402static char print_alua_state(unsigned char state)
057ea7c9
HR
403{
404 switch (state) {
5115fc7e 405 case SCSI_ACCESS_STATE_OPTIMAL:
057ea7c9 406 return 'A';
5115fc7e 407 case SCSI_ACCESS_STATE_ACTIVE:
057ea7c9 408 return 'N';
5115fc7e 409 case SCSI_ACCESS_STATE_STANDBY:
057ea7c9 410 return 'S';
5115fc7e 411 case SCSI_ACCESS_STATE_UNAVAILABLE:
057ea7c9 412 return 'U';
5115fc7e 413 case SCSI_ACCESS_STATE_LBA:
69723d17 414 return 'L';
5115fc7e 415 case SCSI_ACCESS_STATE_OFFLINE:
057ea7c9 416 return 'O';
5115fc7e 417 case SCSI_ACCESS_STATE_TRANSITIONING:
057ea7c9
HR
418 return 'T';
419 default:
420 return 'X';
421 }
422}
423
424static int alua_check_sense(struct scsi_device *sdev,
425 struct scsi_sense_hdr *sense_hdr)
426{
427 switch (sense_hdr->sense_key) {
428 case NOT_READY:
2b35865e 429 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
057ea7c9
HR
430 /*
431 * LUN Not Accessible - ALUA state transition
432 */
2b35865e
HR
433 alua_check(sdev, false);
434 return NEEDS_RETRY;
435 }
057ea7c9
HR
436 break;
437 case UNIT_ATTENTION:
2b35865e 438 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
057ea7c9 439 /*
2b35865e
HR
440 * Power On, Reset, or Bus Device Reset.
441 * Might have obscured a state transition,
442 * so schedule a recheck.
057ea7c9 443 */
2b35865e 444 alua_check(sdev, true);
c7dbb627 445 return ADD_TO_MLQUEUE;
2b35865e 446 }
c20ee7b5
SS
447 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
448 /*
449 * Device internal reset
450 */
451 return ADD_TO_MLQUEUE;
410f02d8
MB
452 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
453 /*
454 * Mode Parameters Changed
455 */
456 return ADD_TO_MLQUEUE;
2b35865e 457 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
057ea7c9
HR
458 /*
459 * ALUA state changed
460 */
2b35865e 461 alua_check(sdev, true);
c7dbb627 462 return ADD_TO_MLQUEUE;
2b35865e
HR
463 }
464 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
057ea7c9
HR
465 /*
466 * Implicit ALUA state transition failed
467 */
2b35865e 468 alua_check(sdev, true);
c7dbb627 469 return ADD_TO_MLQUEUE;
2b35865e 470 }
bf81973a
MB
471 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
472 /*
473 * Inquiry data has changed
474 */
475 return ADD_TO_MLQUEUE;
476 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
4d086f6b
IH
477 /*
478 * REPORTED_LUNS_DATA_HAS_CHANGED is reported
479 * when switching controllers on targets like
480 * Intel Multi-Flex. We can just retry.
481 */
482 return ADD_TO_MLQUEUE;
057ea7c9
HR
483 break;
484 }
485
486 return SCSI_RETURN_NOT_HANDLED;
487}
488
9d2c3039
HR
489/*
490 * alua_tur - Send a TEST UNIT READY
491 * @sdev: device to which the TEST UNIT READY command should be send
492 *
493 * Send a TEST UNIT READY to @sdev to figure out the device state
494 * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING,
495 * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise.
496 */
497static int alua_tur(struct scsi_device *sdev)
498{
499 struct scsi_sense_hdr sense_hdr;
500 int retval;
501
502 retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
503 ALUA_FAILOVER_RETRIES, &sense_hdr);
504 if (sense_hdr.sense_key == NOT_READY &&
505 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
506 return SCSI_DH_RETRY;
507 else if (retval)
508 return SCSI_DH_IO;
509 else
510 return SCSI_DH_OK;
511}
512
057ea7c9
HR
513/*
514 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
515 * @sdev: the device to be evaluated.
516 *
517 * Evaluate the Target Port Group State.
518 * Returns SCSI_DH_DEV_OFFLINED if the path is
25985edc 519 * found to be unusable.
057ea7c9 520 */
28261402 521static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
057ea7c9
HR
522{
523 struct scsi_sense_hdr sense_hdr;
c57168a1 524 struct alua_port_group *tmp_pg;
c49c8345 525 int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
c57168a1 526 unsigned char *desc, *buff;
5597cafc 527 unsigned err, retval;
3588c5a2
RE
528 unsigned int tpg_desc_tbl_off;
529 unsigned char orig_transition_tmo;
c57168a1 530 unsigned long flags;
3588c5a2 531
03197b61
HR
532 if (!pg->expiry) {
533 unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
534
535 if (pg->transition_tmo)
536 transition_tmo = pg->transition_tmo * HZ;
537
538 pg->expiry = round_jiffies_up(jiffies + transition_tmo);
539 }
057ea7c9 540
c49c8345
HR
541 buff = kzalloc(bufflen, GFP_KERNEL);
542 if (!buff)
543 return SCSI_DH_DEV_TEMP_BUSY;
544
057ea7c9 545 retry:
a4bd8520 546 err = 0;
43394c67 547 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
40bb61a7 548
5597cafc 549 if (retval) {
40bb61a7 550 if (!scsi_sense_valid(&sense_hdr)) {
5597cafc
HR
551 sdev_printk(KERN_INFO, sdev,
552 "%s: rtpg failed, result %d\n",
553 ALUA_DH_NAME, retval);
c49c8345 554 kfree(buff);
40bb61a7 555 if (driver_byte(retval) == DRIVER_ERROR)
5597cafc 556 return SCSI_DH_DEV_TEMP_BUSY;
057ea7c9 557 return SCSI_DH_IO;
5597cafc 558 }
057ea7c9 559
8e67ce60
RE
560 /*
561 * submit_rtpg() has failed on existing arrays
562 * when requesting extended header info, and
563 * the array doesn't support extended headers,
564 * even though it shouldn't according to T10.
565 * The retry without rtpg_ext_hdr_req set
566 * handles this.
567 */
43394c67 568 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
8e67ce60
RE
569 sense_hdr.sense_key == ILLEGAL_REQUEST &&
570 sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
43394c67 571 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
8e67ce60
RE
572 goto retry;
573 }
e2d817db
HR
574 /*
575 * Retry on ALUA state transition or if any
576 * UNIT ATTENTION occurred.
577 */
578 if (sense_hdr.sense_key == NOT_READY &&
579 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
580 err = SCSI_DH_RETRY;
581 else if (sense_hdr.sense_key == UNIT_ATTENTION)
582 err = SCSI_DH_RETRY;
03197b61
HR
583 if (err == SCSI_DH_RETRY &&
584 pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
80bd68d6
HR
585 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
586 ALUA_DH_NAME);
587 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
086acff2 588 kfree(buff);
03197b61 589 return err;
80bd68d6
HR
590 }
591 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
592 ALUA_DH_NAME);
593 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
c49c8345 594 kfree(buff);
03197b61 595 pg->expiry = 0;
80bd68d6 596 return SCSI_DH_IO;
057ea7c9 597 }
057ea7c9 598
c49c8345 599 len = get_unaligned_be32(&buff[0]) + 4;
057ea7c9 600
c49c8345 601 if (len > bufflen) {
057ea7c9 602 /* Resubmit with the correct length */
c49c8345
HR
603 kfree(buff);
604 bufflen = len;
605 buff = kmalloc(bufflen, GFP_KERNEL);
606 if (!buff) {
057ea7c9 607 sdev_printk(KERN_WARNING, sdev,
cadbd4a5 608 "%s: kmalloc buffer failed\n",__func__);
057ea7c9 609 /* Temporary failure, bypass */
03197b61 610 pg->expiry = 0;
057ea7c9
HR
611 return SCSI_DH_DEV_TEMP_BUSY;
612 }
613 goto retry;
614 }
615
43394c67 616 orig_transition_tmo = pg->transition_tmo;
c49c8345 617 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0)
43394c67 618 pg->transition_tmo = buff[5];
3588c5a2 619 else
43394c67 620 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
3588c5a2 621
28261402 622 if (orig_transition_tmo != pg->transition_tmo) {
3588c5a2
RE
623 sdev_printk(KERN_INFO, sdev,
624 "%s: transition timeout set to %d seconds\n",
43394c67 625 ALUA_DH_NAME, pg->transition_tmo);
03197b61 626 pg->expiry = jiffies + pg->transition_tmo * HZ;
3588c5a2
RE
627 }
628
c49c8345 629 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
3588c5a2
RE
630 tpg_desc_tbl_off = 8;
631 else
632 tpg_desc_tbl_off = 4;
633
c57168a1 634 for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off;
3588c5a2 635 k < len;
c57168a1
HR
636 k += off, desc += off) {
637 u16 group_id = get_unaligned_be16(&desc[2]);
638
639 spin_lock_irqsave(&port_group_lock, flags);
640 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
641 group_id);
642 spin_unlock_irqrestore(&port_group_lock, flags);
643 if (tmp_pg) {
644 if (spin_trylock_irqsave(&tmp_pg->lock, flags)) {
645 if ((tmp_pg == pg) ||
646 !(tmp_pg->flags & ALUA_PG_RUNNING)) {
cb0a168c
HR
647 struct alua_dh_data *h;
648
c57168a1
HR
649 tmp_pg->state = desc[0] & 0x0f;
650 tmp_pg->pref = desc[0] >> 7;
cb0a168c
HR
651 rcu_read_lock();
652 list_for_each_entry_rcu(h,
653 &tmp_pg->dh_list, node) {
654 /* h->sdev should always be valid */
655 BUG_ON(!h->sdev);
656 h->sdev->access_state = desc[0];
657 }
658 rcu_read_unlock();
c57168a1
HR
659 }
660 if (tmp_pg == pg)
661 valid_states = desc[1];
662 spin_unlock_irqrestore(&tmp_pg->lock, flags);
663 }
664 kref_put(&tmp_pg->kref, release_port_group);
057ea7c9 665 }
c57168a1 666 off = 8 + (desc[7] * 4);
057ea7c9
HR
667 }
668
c57168a1 669 spin_lock_irqsave(&pg->lock, flags);
057ea7c9 670 sdev_printk(KERN_INFO, sdev,
dcd3a754 671 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
43394c67
HR
672 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
673 pg->pref ? "preferred" : "non-preferred",
057ea7c9
HR
674 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
675 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
69723d17 676 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
057ea7c9
HR
677 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
678 valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
679 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
680 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
681
43394c67 682 switch (pg->state) {
5115fc7e 683 case SCSI_ACCESS_STATE_TRANSITIONING:
03197b61 684 if (time_before(jiffies, pg->expiry)) {
28261402 685 /* State transition, retry */
03197b61
HR
686 pg->interval = 2;
687 err = SCSI_DH_RETRY;
688 } else {
cb0a168c
HR
689 struct alua_dh_data *h;
690
03197b61
HR
691 /* Transitioning time exceeded, set port to standby */
692 err = SCSI_DH_IO;
5115fc7e 693 pg->state = SCSI_ACCESS_STATE_STANDBY;
03197b61 694 pg->expiry = 0;
cb0a168c
HR
695 rcu_read_lock();
696 list_for_each_entry_rcu(h, &pg->dh_list, node) {
697 BUG_ON(!h->sdev);
698 h->sdev->access_state =
699 (pg->state & SCSI_ACCESS_STATE_MASK);
700 if (pg->pref)
701 h->sdev->access_state |=
702 SCSI_ACCESS_STATE_PREFERRED;
703 }
704 rcu_read_unlock();
057ea7c9 705 }
69723d17 706 break;
5115fc7e 707 case SCSI_ACCESS_STATE_OFFLINE:
e47f8976 708 /* Path unusable */
69723d17 709 err = SCSI_DH_DEV_OFFLINED;
03197b61 710 pg->expiry = 0;
69723d17
HR
711 break;
712 default:
713 /* Useable path if active */
714 err = SCSI_DH_OK;
03197b61 715 pg->expiry = 0;
69723d17 716 break;
057ea7c9 717 }
c57168a1 718 spin_unlock_irqrestore(&pg->lock, flags);
c49c8345 719 kfree(buff);
057ea7c9
HR
720 return err;
721}
722
f2ecf13a
HR
723/*
724 * alua_stpg - Issue a SET TARGET PORT GROUP command
725 *
726 * Issue a SET TARGET PORT GROUP command and evaluate the
b2460756
HR
727 * response. Returns SCSI_DH_RETRY per default to trigger
728 * a re-evaluation of the target group state or SCSI_DH_OK
729 * if no further action needs to be taken.
f2ecf13a 730 */
43394c67 731static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
f2ecf13a 732{
b2460756
HR
733 int retval;
734 struct scsi_sense_hdr sense_hdr;
f2ecf13a 735
43394c67 736 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
b2460756
HR
737 /* Only implicit ALUA supported, retry */
738 return SCSI_DH_RETRY;
739 }
43394c67 740 switch (pg->state) {
5115fc7e 741 case SCSI_ACCESS_STATE_OPTIMAL:
b2460756 742 return SCSI_DH_OK;
5115fc7e 743 case SCSI_ACCESS_STATE_ACTIVE:
43394c67
HR
744 if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
745 !pg->pref &&
746 (pg->tpgs & TPGS_MODE_IMPLICIT))
b2460756 747 return SCSI_DH_OK;
f2ecf13a 748 break;
5115fc7e
HR
749 case SCSI_ACCESS_STATE_STANDBY:
750 case SCSI_ACCESS_STATE_UNAVAILABLE:
f2ecf13a 751 break;
5115fc7e 752 case SCSI_ACCESS_STATE_OFFLINE:
b2460756 753 return SCSI_DH_IO;
5115fc7e 754 case SCSI_ACCESS_STATE_TRANSITIONING:
f2ecf13a
HR
755 break;
756 default:
b2460756
HR
757 sdev_printk(KERN_INFO, sdev,
758 "%s: stpg failed, unhandled TPGS state %d",
43394c67 759 ALUA_DH_NAME, pg->state);
b2460756 760 return SCSI_DH_NOSYS;
f2ecf13a 761 }
43394c67 762 retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
f2ecf13a 763
b2460756 764 if (retval) {
40bb61a7 765 if (!scsi_sense_valid(&sense_hdr)) {
b2460756
HR
766 sdev_printk(KERN_INFO, sdev,
767 "%s: stpg failed, result %d",
768 ALUA_DH_NAME, retval);
40bb61a7 769 if (driver_byte(retval) == DRIVER_ERROR)
b2460756
HR
770 return SCSI_DH_DEV_TEMP_BUSY;
771 } else {
43394c67 772 sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
b2460756
HR
773 ALUA_DH_NAME);
774 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
775 }
f2ecf13a 776 }
b2460756
HR
777 /* Retry RTPG */
778 return SCSI_DH_RETRY;
f2ecf13a
HR
779}
780
03197b61
HR
781static void alua_rtpg_work(struct work_struct *work)
782{
783 struct alua_port_group *pg =
784 container_of(work, struct alua_port_group, rtpg_work.work);
785 struct scsi_device *sdev;
786 LIST_HEAD(qdata_list);
787 int err = SCSI_DH_OK;
788 struct alua_queue_data *qdata, *tmp;
789 unsigned long flags;
00642a1b 790 struct workqueue_struct *alua_wq = kaluad_wq;
03197b61
HR
791
792 spin_lock_irqsave(&pg->lock, flags);
793 sdev = pg->rtpg_sdev;
794 if (!sdev) {
795 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
796 WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
797 spin_unlock_irqrestore(&pg->lock, flags);
798 return;
799 }
00642a1b
HR
800 if (pg->flags & ALUA_SYNC_STPG)
801 alua_wq = kaluad_sync_wq;
03197b61
HR
802 pg->flags |= ALUA_PG_RUNNING;
803 if (pg->flags & ALUA_PG_RUN_RTPG) {
9d2c3039
HR
804 int state = pg->state;
805
03197b61
HR
806 pg->flags &= ~ALUA_PG_RUN_RTPG;
807 spin_unlock_irqrestore(&pg->lock, flags);
5115fc7e 808 if (state == SCSI_ACCESS_STATE_TRANSITIONING) {
9d2c3039
HR
809 if (alua_tur(sdev) == SCSI_DH_RETRY) {
810 spin_lock_irqsave(&pg->lock, flags);
811 pg->flags &= ~ALUA_PG_RUNNING;
812 pg->flags |= ALUA_PG_RUN_RTPG;
813 spin_unlock_irqrestore(&pg->lock, flags);
814 queue_delayed_work(alua_wq, &pg->rtpg_work,
815 pg->interval * HZ);
816 return;
817 }
818 /* Send RTPG on failure or if TUR indicates SUCCESS */
819 }
03197b61
HR
820 err = alua_rtpg(sdev, pg);
821 spin_lock_irqsave(&pg->lock, flags);
2b35865e 822 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
03197b61
HR
823 pg->flags &= ~ALUA_PG_RUNNING;
824 pg->flags |= ALUA_PG_RUN_RTPG;
825 spin_unlock_irqrestore(&pg->lock, flags);
00642a1b 826 queue_delayed_work(alua_wq, &pg->rtpg_work,
03197b61
HR
827 pg->interval * HZ);
828 return;
829 }
830 if (err != SCSI_DH_OK)
831 pg->flags &= ~ALUA_PG_RUN_STPG;
832 }
833 if (pg->flags & ALUA_PG_RUN_STPG) {
834 pg->flags &= ~ALUA_PG_RUN_STPG;
835 spin_unlock_irqrestore(&pg->lock, flags);
836 err = alua_stpg(sdev, pg);
837 spin_lock_irqsave(&pg->lock, flags);
2b35865e 838 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
03197b61
HR
839 pg->flags |= ALUA_PG_RUN_RTPG;
840 pg->interval = 0;
841 pg->flags &= ~ALUA_PG_RUNNING;
842 spin_unlock_irqrestore(&pg->lock, flags);
00642a1b 843 queue_delayed_work(alua_wq, &pg->rtpg_work,
03197b61
HR
844 pg->interval * HZ);
845 return;
846 }
847 }
848
849 list_splice_init(&pg->rtpg_list, &qdata_list);
850 pg->rtpg_sdev = NULL;
851 spin_unlock_irqrestore(&pg->lock, flags);
852
853 list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) {
854 list_del(&qdata->entry);
855 if (qdata->callback_fn)
856 qdata->callback_fn(qdata->callback_data, err);
857 kfree(qdata);
858 }
859 spin_lock_irqsave(&pg->lock, flags);
860 pg->flags &= ~ALUA_PG_RUNNING;
861 spin_unlock_irqrestore(&pg->lock, flags);
862 scsi_device_put(sdev);
863 kref_put(&pg->kref, release_port_group);
864}
865
866static void alua_rtpg_queue(struct alua_port_group *pg,
867 struct scsi_device *sdev,
2b35865e 868 struct alua_queue_data *qdata, bool force)
03197b61
HR
869{
870 int start_queue = 0;
871 unsigned long flags;
00642a1b 872 struct workqueue_struct *alua_wq = kaluad_wq;
03197b61
HR
873
874 if (!pg)
875 return;
876
877 spin_lock_irqsave(&pg->lock, flags);
878 if (qdata) {
879 list_add_tail(&qdata->entry, &pg->rtpg_list);
880 pg->flags |= ALUA_PG_RUN_STPG;
2b35865e 881 force = true;
03197b61
HR
882 }
883 if (pg->rtpg_sdev == NULL) {
884 pg->interval = 0;
885 pg->flags |= ALUA_PG_RUN_RTPG;
886 kref_get(&pg->kref);
887 pg->rtpg_sdev = sdev;
888 scsi_device_get(sdev);
889 start_queue = 1;
2b35865e
HR
890 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
891 pg->flags |= ALUA_PG_RUN_RTPG;
892 /* Do not queue if the worker is already running */
893 if (!(pg->flags & ALUA_PG_RUNNING)) {
894 kref_get(&pg->kref);
895 start_queue = 1;
896 }
03197b61 897 }
2b35865e 898
00642a1b
HR
899 if (pg->flags & ALUA_SYNC_STPG)
900 alua_wq = kaluad_sync_wq;
03197b61
HR
901 spin_unlock_irqrestore(&pg->lock, flags);
902
903 if (start_queue &&
00642a1b 904 !queue_delayed_work(alua_wq, &pg->rtpg_work,
03197b61
HR
905 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
906 scsi_device_put(sdev);
907 kref_put(&pg->kref, release_port_group);
908 }
909}
910
057ea7c9
HR
911/*
912 * alua_initialize - Initialize ALUA state
913 * @sdev: the device to be initialized
914 *
915 * For the prep_fn to work correctly we have
916 * to initialize the ALUA state for the device.
917 */
918static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
919{
43394c67 920 int err = SCSI_DH_DEV_UNSUPP, tpgs;
057ea7c9 921
03197b61 922 mutex_lock(&h->init_mutex);
43394c67 923 tpgs = alua_check_tpgs(sdev);
a4253fde
HR
924 if (tpgs != TPGS_MODE_NONE)
925 err = alua_check_vpd(sdev, h, tpgs);
03197b61
HR
926 h->init_error = err;
927 mutex_unlock(&h->init_mutex);
057ea7c9
HR
928 return err;
929}
4335d092
MB
930/*
931 * alua_set_params - set/unset the optimize flag
932 * @sdev: device on the path to be activated
933 * params - parameters in the following format
934 * "no_of_params\0param1\0param2\0param3\0...\0"
935 * For example, to set the flag pass the following parameters
936 * from multipath.conf
937 * hardware_handler "2 alua 1"
938 */
939static int alua_set_params(struct scsi_device *sdev, const char *params)
940{
ee14c674 941 struct alua_dh_data *h = sdev->handler_data;
03197b61 942 struct alua_port_group __rcu *pg = NULL;
4335d092
MB
943 unsigned int optimize = 0, argc;
944 const char *p = params;
945 int result = SCSI_DH_OK;
03197b61 946 unsigned long flags;
4335d092
MB
947
948 if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
949 return -EINVAL;
950
951 while (*p++)
952 ;
953 if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
954 return -EINVAL;
955
03197b61
HR
956 rcu_read_lock();
957 pg = rcu_dereference(h->pg);
958 if (!pg) {
959 rcu_read_unlock();
43394c67 960 return -ENXIO;
03197b61
HR
961 }
962 spin_lock_irqsave(&pg->lock, flags);
4335d092 963 if (optimize)
43394c67 964 pg->flags |= ALUA_OPTIMIZE_STPG;
4335d092 965 else
43394c67 966 pg->flags &= ~ALUA_OPTIMIZE_STPG;
03197b61
HR
967 spin_unlock_irqrestore(&pg->lock, flags);
968 rcu_read_unlock();
4335d092
MB
969
970 return result;
971}
057ea7c9
HR
972
973/*
974 * alua_activate - activate a path
975 * @sdev: device on the path to be activated
976 *
977 * We're currently switching the port group to be activated only and
978 * let the array figure out the rest.
979 * There may be other arrays which require us to switch all port groups
980 * based on a certain policy. But until we actually encounter them it
981 * should be okay.
982 */
3ae31f6a
CS
983static int alua_activate(struct scsi_device *sdev,
984 activate_complete fn, void *data)
057ea7c9 985{
ee14c674 986 struct alua_dh_data *h = sdev->handler_data;
057ea7c9 987 int err = SCSI_DH_OK;
03197b61
HR
988 struct alua_queue_data *qdata;
989 struct alua_port_group __rcu *pg;
057ea7c9 990
03197b61
HR
991 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
992 if (!qdata) {
993 err = SCSI_DH_RES_TEMP_UNAVAIL;
46ccf6b5 994 goto out;
03197b61
HR
995 }
996 qdata->callback_fn = fn;
997 qdata->callback_data = data;
998
999 mutex_lock(&h->init_mutex);
1000 rcu_read_lock();
1001 pg = rcu_dereference(h->pg);
1002 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1003 rcu_read_unlock();
1004 kfree(qdata);
1005 err = h->init_error;
1006 mutex_unlock(&h->init_mutex);
43394c67
HR
1007 goto out;
1008 }
03197b61
HR
1009 fn = NULL;
1010 rcu_read_unlock();
1011 mutex_unlock(&h->init_mutex);
1012
2b35865e 1013 alua_rtpg_queue(pg, sdev, qdata, true);
03197b61 1014 kref_put(&pg->kref, release_port_group);
057ea7c9 1015out:
b2460756 1016 if (fn)
3ae31f6a
CS
1017 fn(data, err);
1018 return 0;
057ea7c9
HR
1019}
1020
2b35865e
HR
1021/*
1022 * alua_check - check path status
1023 * @sdev: device on the path to be checked
1024 *
1025 * Check the device status
1026 */
1027static void alua_check(struct scsi_device *sdev, bool force)
1028{
1029 struct alua_dh_data *h = sdev->handler_data;
1030 struct alua_port_group *pg;
1031
1032 rcu_read_lock();
1033 pg = rcu_dereference(h->pg);
1034 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1035 rcu_read_unlock();
1036 return;
1037 }
1038 rcu_read_unlock();
1039
1040 alua_rtpg_queue(pg, sdev, NULL, force);
1041 kref_put(&pg->kref, release_port_group);
1042}
1043
057ea7c9
HR
1044/*
1045 * alua_prep_fn - request callback
1046 *
1047 * Fail I/O to all paths not in state
1048 * active/optimized or active/non-optimized.
1049 */
1050static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
1051{
ee14c674 1052 struct alua_dh_data *h = sdev->handler_data;
03197b61 1053 struct alua_port_group __rcu *pg;
5115fc7e 1054 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
057ea7c9
HR
1055 int ret = BLKPREP_OK;
1056
03197b61
HR
1057 rcu_read_lock();
1058 pg = rcu_dereference(h->pg);
1059 if (pg)
1060 state = pg->state;
1061 rcu_read_unlock();
5115fc7e 1062 if (state == SCSI_ACCESS_STATE_TRANSITIONING)
69723d17 1063 ret = BLKPREP_DEFER;
5115fc7e
HR
1064 else if (state != SCSI_ACCESS_STATE_OPTIMAL &&
1065 state != SCSI_ACCESS_STATE_ACTIVE &&
1066 state != SCSI_ACCESS_STATE_LBA) {
057ea7c9 1067 ret = BLKPREP_KILL;
e8064021 1068 req->rq_flags |= RQF_QUIET;
057ea7c9
HR
1069 }
1070 return ret;
1071
1072}
1073
d3d32891
HR
1074static void alua_rescan(struct scsi_device *sdev)
1075{
1076 struct alua_dh_data *h = sdev->handler_data;
1077
1078 alua_initialize(sdev, h);
1079}
1080
057ea7c9
HR
1081/*
1082 * alua_bus_attach - Attach device handler
1083 * @sdev: device to be attached to
1084 */
ee14c674 1085static int alua_bus_attach(struct scsi_device *sdev)
057ea7c9 1086{
057ea7c9 1087 struct alua_dh_data *h;
43394c67 1088 int err, ret = -EINVAL;
057ea7c9 1089
cd37743f 1090 h = kzalloc(sizeof(*h) , GFP_KERNEL);
1d520328 1091 if (!h)
ee14c674 1092 return -ENOMEM;
03197b61
HR
1093 spin_lock_init(&h->pg_lock);
1094 rcu_assign_pointer(h->pg, NULL);
1095 h->init_error = SCSI_DH_OK;
96e65865 1096 h->sdev = sdev;
cb0a168c 1097 INIT_LIST_HEAD(&h->node);
057ea7c9 1098
03197b61 1099 mutex_init(&h->init_mutex);
057ea7c9 1100 err = alua_initialize(sdev, h);
43394c67
HR
1101 if (err == SCSI_DH_NOMEM)
1102 ret = -ENOMEM;
1d520328 1103 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
057ea7c9
HR
1104 goto failed;
1105
ee14c674
CH
1106 sdev->handler_data = h;
1107 return 0;
057ea7c9 1108failed:
cd37743f 1109 kfree(h);
43394c67 1110 return ret;
057ea7c9
HR
1111}
1112
1113/*
1114 * alua_bus_detach - Detach device handler
1115 * @sdev: device to be detached from
1116 */
1117static void alua_bus_detach(struct scsi_device *sdev)
1118{
ee14c674 1119 struct alua_dh_data *h = sdev->handler_data;
03197b61
HR
1120 struct alua_port_group *pg;
1121
1122 spin_lock(&h->pg_lock);
1123 pg = h->pg;
1124 rcu_assign_pointer(h->pg, NULL);
1125 h->sdev = NULL;
1126 spin_unlock(&h->pg_lock);
cb0a168c 1127 if (pg) {
38c31599 1128 spin_lock_irq(&pg->lock);
cb0a168c 1129 list_del_rcu(&h->node);
38c31599 1130 spin_unlock_irq(&pg->lock);
03197b61 1131 kref_put(&pg->kref, release_port_group);
cb0a168c 1132 }
ee14c674 1133 sdev->handler_data = NULL;
cd37743f 1134 kfree(h);
057ea7c9
HR
1135}
1136
1d520328
CH
1137static struct scsi_device_handler alua_dh = {
1138 .name = ALUA_DH_NAME,
1139 .module = THIS_MODULE,
1140 .attach = alua_bus_attach,
1141 .detach = alua_bus_detach,
1142 .prep_fn = alua_prep_fn,
1143 .check_sense = alua_check_sense,
1144 .activate = alua_activate,
d3d32891 1145 .rescan = alua_rescan,
1d520328 1146 .set_params = alua_set_params,
1d520328
CH
1147};
1148
057ea7c9
HR
1149static int __init alua_init(void)
1150{
1151 int r;
1152
03197b61
HR
1153 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
1154 if (!kaluad_wq) {
1155 /* Temporary failure, bypass */
1156 return SCSI_DH_DEV_TEMP_BUSY;
1157 }
00642a1b
HR
1158 kaluad_sync_wq = create_workqueue("kaluad_sync");
1159 if (!kaluad_sync_wq) {
1160 destroy_workqueue(kaluad_wq);
1161 return SCSI_DH_DEV_TEMP_BUSY;
1162 }
057ea7c9 1163 r = scsi_register_device_handler(&alua_dh);
03197b61 1164 if (r != 0) {
057ea7c9
HR
1165 printk(KERN_ERR "%s: Failed to register scsi device handler",
1166 ALUA_DH_NAME);
00642a1b 1167 destroy_workqueue(kaluad_sync_wq);
03197b61
HR
1168 destroy_workqueue(kaluad_wq);
1169 }
057ea7c9
HR
1170 return r;
1171}
1172
1173static void __exit alua_exit(void)
1174{
1175 scsi_unregister_device_handler(&alua_dh);
00642a1b 1176 destroy_workqueue(kaluad_sync_wq);
03197b61 1177 destroy_workqueue(kaluad_wq);
057ea7c9
HR
1178}
1179
1180module_init(alua_init);
1181module_exit(alua_exit);
1182
1183MODULE_DESCRIPTION("DM Multipath ALUA support");
1184MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
1185MODULE_LICENSE("GPL");
1186MODULE_VERSION(ALUA_DH_VER);