[SCSI] qla4xxx: Add support to set CHAP entries
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / qla4xxx / ql4_os.c
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22
23 /*
24 * Driver version
25 */
26 static char qla4xxx_version_str[40];
27
28 /*
29 * SRB allocation cache
30 */
31 static struct kmem_cache *srb_cachep;
32
33 /*
34 * Module parameter information and variables
35 */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 " Set to disable exporting boot targets to sysfs.\n"
40 "\t\t 0 - Export boot targets\n"
41 "\t\t 1 - Do not export boot targets (Default)");
42
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 " Don't reset the HBA for driver recovery.\n"
47 "\t\t 0 - It will reset HBA (Default)\n"
48 "\t\t 1 - It will NOT reset HBA");
49
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 " Option to enable extended error logging.\n"
54 "\t\t 0 - no logging (Default)\n"
55 "\t\t 2 - debug logging");
56
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 "\t\t 0 = enable INTx interrupt mechanism.\n"
62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
63 "\t\t 2 = enable MSI interrupt mechanism.");
64
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 " Maximum queue depth to report for target devices.\n"
70 "\t\t Default: 32.");
71
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 " Enable or disable dynamic tracking and adjustment of\n"
76 "\t\t scsi device queue depth.\n"
77 "\t\t 0 - Disable.\n"
78 "\t\t 1 - Enable. (Default)");
79
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec.");
85
86 int ql4xmdcapmask = 0x1F;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0x1F.\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
92
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 " Set to enable minidump.\n"
97 "\t\t 0 - disable minidump\n"
98 "\t\t 1 - enable minidump (Default)");
99
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102 * SCSI host template entry points
103 */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105
106 /*
107 * iSCSI template entry points
108 */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 enum iscsi_param_type param_type,
119 int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 struct sockaddr *dst_addr,
123 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 struct iscsi_cls_conn *cls_conn,
133 uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 uint32_t iface_type, uint32_t payload_size,
148 uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
153 int len);
154
155 /*
156 * SCSI host template entry points
157 */
158 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
161 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
162 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
163 static int qla4xxx_slave_alloc(struct scsi_device *device);
164 static int qla4xxx_slave_configure(struct scsi_device *device);
165 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
166 static umode_t qla4_attr_is_visible(int param_type, int param);
167 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
168 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
169 int reason);
170
171 /*
172 * iSCSI Flash DDB sysfs entry points
173 */
174 static int
175 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
176 struct iscsi_bus_flash_conn *fnode_conn,
177 void *data, int len);
178 static int
179 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
180 int param, char *buf);
181 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
182 int len);
183 static int
184 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
185 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
186 struct iscsi_bus_flash_conn *fnode_conn);
187 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
188 struct iscsi_bus_flash_conn *fnode_conn);
189 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
190
191 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
192 QLA82XX_LEGACY_INTR_CONFIG;
193
194 static struct scsi_host_template qla4xxx_driver_template = {
195 .module = THIS_MODULE,
196 .name = DRIVER_NAME,
197 .proc_name = DRIVER_NAME,
198 .queuecommand = qla4xxx_queuecommand,
199
200 .eh_abort_handler = qla4xxx_eh_abort,
201 .eh_device_reset_handler = qla4xxx_eh_device_reset,
202 .eh_target_reset_handler = qla4xxx_eh_target_reset,
203 .eh_host_reset_handler = qla4xxx_eh_host_reset,
204 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
205
206 .slave_configure = qla4xxx_slave_configure,
207 .slave_alloc = qla4xxx_slave_alloc,
208 .slave_destroy = qla4xxx_slave_destroy,
209 .change_queue_depth = qla4xxx_change_queue_depth,
210
211 .this_id = -1,
212 .cmd_per_lun = 3,
213 .use_clustering = ENABLE_CLUSTERING,
214 .sg_tablesize = SG_ALL,
215
216 .max_sectors = 0xFFFF,
217 .shost_attrs = qla4xxx_host_attrs,
218 .host_reset = qla4xxx_host_reset,
219 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
220 };
221
222 static struct iscsi_transport qla4xxx_iscsi_transport = {
223 .owner = THIS_MODULE,
224 .name = DRIVER_NAME,
225 .caps = CAP_TEXT_NEGO |
226 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
227 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
228 CAP_MULTI_R2T,
229 .attr_is_visible = qla4_attr_is_visible,
230 .create_session = qla4xxx_session_create,
231 .destroy_session = qla4xxx_session_destroy,
232 .start_conn = qla4xxx_conn_start,
233 .create_conn = qla4xxx_conn_create,
234 .bind_conn = qla4xxx_conn_bind,
235 .stop_conn = iscsi_conn_stop,
236 .destroy_conn = qla4xxx_conn_destroy,
237 .set_param = iscsi_set_param,
238 .get_conn_param = qla4xxx_conn_get_param,
239 .get_session_param = qla4xxx_session_get_param,
240 .get_ep_param = qla4xxx_get_ep_param,
241 .ep_connect = qla4xxx_ep_connect,
242 .ep_poll = qla4xxx_ep_poll,
243 .ep_disconnect = qla4xxx_ep_disconnect,
244 .get_stats = qla4xxx_conn_get_stats,
245 .send_pdu = iscsi_conn_send_pdu,
246 .xmit_task = qla4xxx_task_xmit,
247 .cleanup_task = qla4xxx_task_cleanup,
248 .alloc_pdu = qla4xxx_alloc_pdu,
249
250 .get_host_param = qla4xxx_host_get_param,
251 .set_iface_param = qla4xxx_iface_set_param,
252 .get_iface_param = qla4xxx_get_iface_param,
253 .bsg_request = qla4xxx_bsg_request,
254 .send_ping = qla4xxx_send_ping,
255 .get_chap = qla4xxx_get_chap_list,
256 .delete_chap = qla4xxx_delete_chap,
257 .set_chap = qla4xxx_set_chap_entry,
258 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
259 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
260 .new_flashnode = qla4xxx_sysfs_ddb_add,
261 .del_flashnode = qla4xxx_sysfs_ddb_delete,
262 .login_flashnode = qla4xxx_sysfs_ddb_login,
263 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
264 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
265 };
266
267 static struct scsi_transport_template *qla4xxx_scsi_transport;
268
269 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
270 uint32_t iface_type, uint32_t payload_size,
271 uint32_t pid, struct sockaddr *dst_addr)
272 {
273 struct scsi_qla_host *ha = to_qla_host(shost);
274 struct sockaddr_in *addr;
275 struct sockaddr_in6 *addr6;
276 uint32_t options = 0;
277 uint8_t ipaddr[IPv6_ADDR_LEN];
278 int rval;
279
280 memset(ipaddr, 0, IPv6_ADDR_LEN);
281 /* IPv4 to IPv4 */
282 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
283 (dst_addr->sa_family == AF_INET)) {
284 addr = (struct sockaddr_in *)dst_addr;
285 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
286 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
287 "dest: %pI4\n", __func__,
288 &ha->ip_config.ip_address, ipaddr));
289 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
290 ipaddr);
291 if (rval)
292 rval = -EINVAL;
293 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
294 (dst_addr->sa_family == AF_INET6)) {
295 /* IPv6 to IPv6 */
296 addr6 = (struct sockaddr_in6 *)dst_addr;
297 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
298
299 options |= PING_IPV6_PROTOCOL_ENABLE;
300
301 /* Ping using LinkLocal address */
302 if ((iface_num == 0) || (iface_num == 1)) {
303 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
304 "src: %pI6 dest: %pI6\n", __func__,
305 &ha->ip_config.ipv6_link_local_addr,
306 ipaddr));
307 options |= PING_IPV6_LINKLOCAL_ADDR;
308 rval = qla4xxx_ping_iocb(ha, options, payload_size,
309 pid, ipaddr);
310 } else {
311 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
312 "not supported\n", __func__, iface_num);
313 rval = -ENOSYS;
314 goto exit_send_ping;
315 }
316
317 /*
318 * If ping using LinkLocal address fails, try ping using
319 * IPv6 address
320 */
321 if (rval != QLA_SUCCESS) {
322 options &= ~PING_IPV6_LINKLOCAL_ADDR;
323 if (iface_num == 0) {
324 options |= PING_IPV6_ADDR0;
325 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
326 "Ping src: %pI6 "
327 "dest: %pI6\n", __func__,
328 &ha->ip_config.ipv6_addr0,
329 ipaddr));
330 } else if (iface_num == 1) {
331 options |= PING_IPV6_ADDR1;
332 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
333 "Ping src: %pI6 "
334 "dest: %pI6\n", __func__,
335 &ha->ip_config.ipv6_addr1,
336 ipaddr));
337 }
338 rval = qla4xxx_ping_iocb(ha, options, payload_size,
339 pid, ipaddr);
340 if (rval)
341 rval = -EINVAL;
342 }
343 } else
344 rval = -ENOSYS;
345 exit_send_ping:
346 return rval;
347 }
348
349 static umode_t qla4_attr_is_visible(int param_type, int param)
350 {
351 switch (param_type) {
352 case ISCSI_HOST_PARAM:
353 switch (param) {
354 case ISCSI_HOST_PARAM_HWADDRESS:
355 case ISCSI_HOST_PARAM_IPADDRESS:
356 case ISCSI_HOST_PARAM_INITIATOR_NAME:
357 case ISCSI_HOST_PARAM_PORT_STATE:
358 case ISCSI_HOST_PARAM_PORT_SPEED:
359 return S_IRUGO;
360 default:
361 return 0;
362 }
363 case ISCSI_PARAM:
364 switch (param) {
365 case ISCSI_PARAM_PERSISTENT_ADDRESS:
366 case ISCSI_PARAM_PERSISTENT_PORT:
367 case ISCSI_PARAM_CONN_ADDRESS:
368 case ISCSI_PARAM_CONN_PORT:
369 case ISCSI_PARAM_TARGET_NAME:
370 case ISCSI_PARAM_TPGT:
371 case ISCSI_PARAM_TARGET_ALIAS:
372 case ISCSI_PARAM_MAX_BURST:
373 case ISCSI_PARAM_MAX_R2T:
374 case ISCSI_PARAM_FIRST_BURST:
375 case ISCSI_PARAM_MAX_RECV_DLENGTH:
376 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
377 case ISCSI_PARAM_IFACE_NAME:
378 case ISCSI_PARAM_CHAP_OUT_IDX:
379 case ISCSI_PARAM_CHAP_IN_IDX:
380 case ISCSI_PARAM_USERNAME:
381 case ISCSI_PARAM_PASSWORD:
382 case ISCSI_PARAM_USERNAME_IN:
383 case ISCSI_PARAM_PASSWORD_IN:
384 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
385 case ISCSI_PARAM_DISCOVERY_SESS:
386 case ISCSI_PARAM_PORTAL_TYPE:
387 case ISCSI_PARAM_CHAP_AUTH_EN:
388 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
389 case ISCSI_PARAM_BIDI_CHAP_EN:
390 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
391 case ISCSI_PARAM_DEF_TIME2WAIT:
392 case ISCSI_PARAM_DEF_TIME2RETAIN:
393 case ISCSI_PARAM_HDRDGST_EN:
394 case ISCSI_PARAM_DATADGST_EN:
395 case ISCSI_PARAM_INITIAL_R2T_EN:
396 case ISCSI_PARAM_IMM_DATA_EN:
397 case ISCSI_PARAM_PDU_INORDER_EN:
398 case ISCSI_PARAM_DATASEQ_INORDER_EN:
399 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
400 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
401 case ISCSI_PARAM_TCP_WSF_DISABLE:
402 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
403 case ISCSI_PARAM_TCP_TIMER_SCALE:
404 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
405 case ISCSI_PARAM_TCP_XMIT_WSF:
406 case ISCSI_PARAM_TCP_RECV_WSF:
407 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
408 case ISCSI_PARAM_IPV4_TOS:
409 case ISCSI_PARAM_IPV6_TC:
410 case ISCSI_PARAM_IPV6_FLOW_LABEL:
411 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
412 case ISCSI_PARAM_KEEPALIVE_TMO:
413 case ISCSI_PARAM_LOCAL_PORT:
414 case ISCSI_PARAM_ISID:
415 case ISCSI_PARAM_TSID:
416 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
417 case ISCSI_PARAM_ERL:
418 case ISCSI_PARAM_STATSN:
419 case ISCSI_PARAM_EXP_STATSN:
420 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
421 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
422 return S_IRUGO;
423 default:
424 return 0;
425 }
426 case ISCSI_NET_PARAM:
427 switch (param) {
428 case ISCSI_NET_PARAM_IPV4_ADDR:
429 case ISCSI_NET_PARAM_IPV4_SUBNET:
430 case ISCSI_NET_PARAM_IPV4_GW:
431 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
432 case ISCSI_NET_PARAM_IFACE_ENABLE:
433 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
434 case ISCSI_NET_PARAM_IPV6_ADDR:
435 case ISCSI_NET_PARAM_IPV6_ROUTER:
436 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
437 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
438 case ISCSI_NET_PARAM_VLAN_ID:
439 case ISCSI_NET_PARAM_VLAN_PRIORITY:
440 case ISCSI_NET_PARAM_VLAN_ENABLED:
441 case ISCSI_NET_PARAM_MTU:
442 case ISCSI_NET_PARAM_PORT:
443 return S_IRUGO;
444 default:
445 return 0;
446 }
447 case ISCSI_FLASHNODE_PARAM:
448 switch (param) {
449 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
450 case ISCSI_FLASHNODE_PORTAL_TYPE:
451 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
452 case ISCSI_FLASHNODE_DISCOVERY_SESS:
453 case ISCSI_FLASHNODE_ENTRY_EN:
454 case ISCSI_FLASHNODE_HDR_DGST_EN:
455 case ISCSI_FLASHNODE_DATA_DGST_EN:
456 case ISCSI_FLASHNODE_IMM_DATA_EN:
457 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
458 case ISCSI_FLASHNODE_DATASEQ_INORDER:
459 case ISCSI_FLASHNODE_PDU_INORDER:
460 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
461 case ISCSI_FLASHNODE_SNACK_REQ_EN:
462 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
463 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
464 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
465 case ISCSI_FLASHNODE_ERL:
466 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
467 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
468 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
469 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
470 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
471 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
472 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
473 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
474 case ISCSI_FLASHNODE_FIRST_BURST:
475 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
476 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
477 case ISCSI_FLASHNODE_MAX_R2T:
478 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
479 case ISCSI_FLASHNODE_ISID:
480 case ISCSI_FLASHNODE_TSID:
481 case ISCSI_FLASHNODE_PORT:
482 case ISCSI_FLASHNODE_MAX_BURST:
483 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
484 case ISCSI_FLASHNODE_IPADDR:
485 case ISCSI_FLASHNODE_ALIAS:
486 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
487 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
488 case ISCSI_FLASHNODE_LOCAL_PORT:
489 case ISCSI_FLASHNODE_IPV4_TOS:
490 case ISCSI_FLASHNODE_IPV6_TC:
491 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
492 case ISCSI_FLASHNODE_NAME:
493 case ISCSI_FLASHNODE_TPGT:
494 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
495 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
496 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
497 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
498 case ISCSI_FLASHNODE_TCP_RECV_WSF:
499 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
500 case ISCSI_FLASHNODE_USERNAME:
501 case ISCSI_FLASHNODE_PASSWORD:
502 case ISCSI_FLASHNODE_STATSN:
503 case ISCSI_FLASHNODE_EXP_STATSN:
504 case ISCSI_FLASHNODE_IS_BOOT_TGT:
505 return S_IRUGO;
506 default:
507 return 0;
508 }
509 }
510
511 return 0;
512 }
513
514 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
515 int16_t chap_index,
516 struct ql4_chap_table **chap_entry)
517 {
518 int rval = QLA_ERROR;
519 int max_chap_entries;
520
521 if (!ha->chap_list) {
522 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
523 rval = QLA_ERROR;
524 goto exit_get_chap;
525 }
526
527 if (is_qla80XX(ha))
528 max_chap_entries = (ha->hw.flt_chap_size / 2) /
529 sizeof(struct ql4_chap_table);
530 else
531 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
532
533 if (chap_index > max_chap_entries) {
534 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
535 rval = QLA_ERROR;
536 goto exit_get_chap;
537 }
538
539 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
540 if ((*chap_entry)->cookie !=
541 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
542 rval = QLA_ERROR;
543 *chap_entry = NULL;
544 } else {
545 rval = QLA_SUCCESS;
546 }
547
548 exit_get_chap:
549 return rval;
550 }
551
552 /**
553 * qla4xxx_find_free_chap_index - Find the first free chap index
554 * @ha: pointer to adapter structure
555 * @chap_index: CHAP index to be returned
556 *
557 * Find the first free chap index available in the chap table
558 *
559 * Note: Caller should acquire the chap lock before getting here.
560 **/
561 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
562 uint16_t *chap_index)
563 {
564 int i, rval;
565 int free_index = -1;
566 int max_chap_entries = 0;
567 struct ql4_chap_table *chap_table;
568
569 if (is_qla80XX(ha))
570 max_chap_entries = (ha->hw.flt_chap_size / 2) /
571 sizeof(struct ql4_chap_table);
572 else
573 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
574
575 if (!ha->chap_list) {
576 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
577 rval = QLA_ERROR;
578 goto exit_find_chap;
579 }
580
581 for (i = 0; i < max_chap_entries; i++) {
582 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
583
584 if ((chap_table->cookie !=
585 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
586 (i > MAX_RESRV_CHAP_IDX)) {
587 free_index = i;
588 break;
589 }
590 }
591
592 if (free_index != -1) {
593 *chap_index = free_index;
594 rval = QLA_SUCCESS;
595 } else {
596 rval = QLA_ERROR;
597 }
598
599 exit_find_chap:
600 return rval;
601 }
602
603 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
604 uint32_t *num_entries, char *buf)
605 {
606 struct scsi_qla_host *ha = to_qla_host(shost);
607 struct ql4_chap_table *chap_table;
608 struct iscsi_chap_rec *chap_rec;
609 int max_chap_entries = 0;
610 int valid_chap_entries = 0;
611 int ret = 0, i;
612
613 if (is_qla80XX(ha))
614 max_chap_entries = (ha->hw.flt_chap_size / 2) /
615 sizeof(struct ql4_chap_table);
616 else
617 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
618
619 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
620 __func__, *num_entries, chap_tbl_idx);
621
622 if (!buf) {
623 ret = -ENOMEM;
624 goto exit_get_chap_list;
625 }
626
627 chap_rec = (struct iscsi_chap_rec *) buf;
628 mutex_lock(&ha->chap_sem);
629 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
630 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
631 if (chap_table->cookie !=
632 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
633 continue;
634
635 chap_rec->chap_tbl_idx = i;
636 strncpy(chap_rec->username, chap_table->name,
637 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
638 strncpy(chap_rec->password, chap_table->secret,
639 QL4_CHAP_MAX_SECRET_LEN);
640 chap_rec->password_length = chap_table->secret_len;
641
642 if (chap_table->flags & BIT_7) /* local */
643 chap_rec->chap_type = CHAP_TYPE_OUT;
644
645 if (chap_table->flags & BIT_6) /* peer */
646 chap_rec->chap_type = CHAP_TYPE_IN;
647
648 chap_rec++;
649
650 valid_chap_entries++;
651 if (valid_chap_entries == *num_entries)
652 break;
653 else
654 continue;
655 }
656 mutex_unlock(&ha->chap_sem);
657
658 exit_get_chap_list:
659 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
660 __func__, valid_chap_entries);
661 *num_entries = valid_chap_entries;
662 return ret;
663 }
664
665 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
666 {
667 int ret = 0;
668 uint16_t *chap_tbl_idx = (uint16_t *) data;
669 struct iscsi_cls_session *cls_session;
670 struct iscsi_session *sess;
671 struct ddb_entry *ddb_entry;
672
673 if (!iscsi_is_session_dev(dev))
674 goto exit_is_chap_active;
675
676 cls_session = iscsi_dev_to_session(dev);
677 sess = cls_session->dd_data;
678 ddb_entry = sess->dd_data;
679
680 if (iscsi_session_chkready(cls_session))
681 goto exit_is_chap_active;
682
683 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
684 ret = 1;
685
686 exit_is_chap_active:
687 return ret;
688 }
689
690 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
691 uint16_t chap_tbl_idx)
692 {
693 int ret = 0;
694
695 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
696 __qla4xxx_is_chap_active);
697
698 return ret;
699 }
700
701 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
702 {
703 struct scsi_qla_host *ha = to_qla_host(shost);
704 struct ql4_chap_table *chap_table;
705 dma_addr_t chap_dma;
706 int max_chap_entries = 0;
707 uint32_t offset = 0;
708 uint32_t chap_size;
709 int ret = 0;
710
711 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
712 if (chap_table == NULL)
713 return -ENOMEM;
714
715 memset(chap_table, 0, sizeof(struct ql4_chap_table));
716
717 if (is_qla80XX(ha))
718 max_chap_entries = (ha->hw.flt_chap_size / 2) /
719 sizeof(struct ql4_chap_table);
720 else
721 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
722
723 if (chap_tbl_idx > max_chap_entries) {
724 ret = -EINVAL;
725 goto exit_delete_chap;
726 }
727
728 /* Check if chap index is in use.
729 * If chap is in use don't delet chap entry */
730 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
731 if (ret) {
732 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
733 "delete from flash\n", chap_tbl_idx);
734 ret = -EBUSY;
735 goto exit_delete_chap;
736 }
737
738 chap_size = sizeof(struct ql4_chap_table);
739 if (is_qla40XX(ha))
740 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
741 else {
742 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
743 /* flt_chap_size is CHAP table size for both ports
744 * so divide it by 2 to calculate the offset for second port
745 */
746 if (ha->port_num == 1)
747 offset += (ha->hw.flt_chap_size / 2);
748 offset += (chap_tbl_idx * chap_size);
749 }
750
751 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
752 if (ret != QLA_SUCCESS) {
753 ret = -EINVAL;
754 goto exit_delete_chap;
755 }
756
757 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
758 __le16_to_cpu(chap_table->cookie)));
759
760 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
761 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
762 goto exit_delete_chap;
763 }
764
765 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
766
767 offset = FLASH_CHAP_OFFSET |
768 (chap_tbl_idx * sizeof(struct ql4_chap_table));
769 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
770 FLASH_OPT_RMW_COMMIT);
771 if (ret == QLA_SUCCESS && ha->chap_list) {
772 mutex_lock(&ha->chap_sem);
773 /* Update ha chap_list cache */
774 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
775 chap_table, sizeof(struct ql4_chap_table));
776 mutex_unlock(&ha->chap_sem);
777 }
778 if (ret != QLA_SUCCESS)
779 ret = -EINVAL;
780
781 exit_delete_chap:
782 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
783 return ret;
784 }
785
786 /**
787 * qla4xxx_set_chap_entry - Make chap entry with given information
788 * @shost: pointer to host
789 * @data: chap info - credentials, index and type to make chap entry
790 * @len: length of data
791 *
792 * Add or update chap entry with the given information
793 **/
794 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
795 {
796 struct scsi_qla_host *ha = to_qla_host(shost);
797 struct iscsi_chap_rec chap_rec;
798 struct ql4_chap_table *chap_entry = NULL;
799 struct iscsi_param_info *param_info;
800 struct nlattr *attr;
801 int max_chap_entries = 0;
802 int type;
803 int rem = len;
804 int rc = 0;
805
806 memset(&chap_rec, 0, sizeof(chap_rec));
807
808 nla_for_each_attr(attr, data, len, rem) {
809 param_info = nla_data(attr);
810
811 switch (param_info->param) {
812 case ISCSI_CHAP_PARAM_INDEX:
813 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
814 break;
815 case ISCSI_CHAP_PARAM_CHAP_TYPE:
816 chap_rec.chap_type = param_info->value[0];
817 break;
818 case ISCSI_CHAP_PARAM_USERNAME:
819 memcpy(chap_rec.username, param_info->value,
820 param_info->len);
821 break;
822 case ISCSI_CHAP_PARAM_PASSWORD:
823 memcpy(chap_rec.password, param_info->value,
824 param_info->len);
825 break;
826 case ISCSI_CHAP_PARAM_PASSWORD_LEN:
827 chap_rec.password_length = param_info->value[0];
828 break;
829 default:
830 ql4_printk(KERN_ERR, ha,
831 "%s: No such sysfs attribute\n", __func__);
832 rc = -ENOSYS;
833 goto exit_set_chap;
834 };
835 }
836
837 if (chap_rec.chap_type == CHAP_TYPE_IN)
838 type = BIDI_CHAP;
839 else
840 type = LOCAL_CHAP;
841
842 if (is_qla80XX(ha))
843 max_chap_entries = (ha->hw.flt_chap_size / 2) /
844 sizeof(struct ql4_chap_table);
845 else
846 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
847
848 mutex_lock(&ha->chap_sem);
849 if (chap_rec.chap_tbl_idx < max_chap_entries) {
850 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
851 &chap_entry);
852 if (!rc) {
853 if (!(type == qla4xxx_get_chap_type(chap_entry))) {
854 ql4_printk(KERN_INFO, ha,
855 "Type mismatch for CHAP entry %d\n",
856 chap_rec.chap_tbl_idx);
857 rc = -EINVAL;
858 goto exit_unlock_chap;
859 }
860
861 /* If chap index is in use then don't modify it */
862 rc = qla4xxx_is_chap_active(shost,
863 chap_rec.chap_tbl_idx);
864 if (rc) {
865 ql4_printk(KERN_INFO, ha,
866 "CHAP entry %d is in use\n",
867 chap_rec.chap_tbl_idx);
868 rc = -EBUSY;
869 goto exit_unlock_chap;
870 }
871 }
872 } else {
873 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
874 if (rc) {
875 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
876 rc = -EBUSY;
877 goto exit_unlock_chap;
878 }
879 }
880
881 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
882 chap_rec.chap_tbl_idx, type);
883
884 exit_unlock_chap:
885 mutex_unlock(&ha->chap_sem);
886
887 exit_set_chap:
888 return rc;
889 }
890
891 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
892 enum iscsi_param_type param_type,
893 int param, char *buf)
894 {
895 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
896 struct scsi_qla_host *ha = to_qla_host(shost);
897 int len = -ENOSYS;
898
899 if (param_type != ISCSI_NET_PARAM)
900 return -ENOSYS;
901
902 switch (param) {
903 case ISCSI_NET_PARAM_IPV4_ADDR:
904 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
905 break;
906 case ISCSI_NET_PARAM_IPV4_SUBNET:
907 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
908 break;
909 case ISCSI_NET_PARAM_IPV4_GW:
910 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
911 break;
912 case ISCSI_NET_PARAM_IFACE_ENABLE:
913 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
914 len = sprintf(buf, "%s\n",
915 (ha->ip_config.ipv4_options &
916 IPOPT_IPV4_PROTOCOL_ENABLE) ?
917 "enabled" : "disabled");
918 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
919 len = sprintf(buf, "%s\n",
920 (ha->ip_config.ipv6_options &
921 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
922 "enabled" : "disabled");
923 break;
924 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
925 len = sprintf(buf, "%s\n",
926 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
927 "dhcp" : "static");
928 break;
929 case ISCSI_NET_PARAM_IPV6_ADDR:
930 if (iface->iface_num == 0)
931 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
932 if (iface->iface_num == 1)
933 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
934 break;
935 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
936 len = sprintf(buf, "%pI6\n",
937 &ha->ip_config.ipv6_link_local_addr);
938 break;
939 case ISCSI_NET_PARAM_IPV6_ROUTER:
940 len = sprintf(buf, "%pI6\n",
941 &ha->ip_config.ipv6_default_router_addr);
942 break;
943 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
944 len = sprintf(buf, "%s\n",
945 (ha->ip_config.ipv6_addl_options &
946 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
947 "nd" : "static");
948 break;
949 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
950 len = sprintf(buf, "%s\n",
951 (ha->ip_config.ipv6_addl_options &
952 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
953 "auto" : "static");
954 break;
955 case ISCSI_NET_PARAM_VLAN_ID:
956 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
957 len = sprintf(buf, "%d\n",
958 (ha->ip_config.ipv4_vlan_tag &
959 ISCSI_MAX_VLAN_ID));
960 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
961 len = sprintf(buf, "%d\n",
962 (ha->ip_config.ipv6_vlan_tag &
963 ISCSI_MAX_VLAN_ID));
964 break;
965 case ISCSI_NET_PARAM_VLAN_PRIORITY:
966 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
967 len = sprintf(buf, "%d\n",
968 ((ha->ip_config.ipv4_vlan_tag >> 13) &
969 ISCSI_MAX_VLAN_PRIORITY));
970 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
971 len = sprintf(buf, "%d\n",
972 ((ha->ip_config.ipv6_vlan_tag >> 13) &
973 ISCSI_MAX_VLAN_PRIORITY));
974 break;
975 case ISCSI_NET_PARAM_VLAN_ENABLED:
976 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
977 len = sprintf(buf, "%s\n",
978 (ha->ip_config.ipv4_options &
979 IPOPT_VLAN_TAGGING_ENABLE) ?
980 "enabled" : "disabled");
981 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
982 len = sprintf(buf, "%s\n",
983 (ha->ip_config.ipv6_options &
984 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
985 "enabled" : "disabled");
986 break;
987 case ISCSI_NET_PARAM_MTU:
988 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
989 break;
990 case ISCSI_NET_PARAM_PORT:
991 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
992 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
993 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
994 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
995 break;
996 default:
997 len = -ENOSYS;
998 }
999
1000 return len;
1001 }
1002
1003 static struct iscsi_endpoint *
1004 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1005 int non_blocking)
1006 {
1007 int ret;
1008 struct iscsi_endpoint *ep;
1009 struct qla_endpoint *qla_ep;
1010 struct scsi_qla_host *ha;
1011 struct sockaddr_in *addr;
1012 struct sockaddr_in6 *addr6;
1013
1014 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1015 if (!shost) {
1016 ret = -ENXIO;
1017 printk(KERN_ERR "%s: shost is NULL\n",
1018 __func__);
1019 return ERR_PTR(ret);
1020 }
1021
1022 ha = iscsi_host_priv(shost);
1023
1024 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1025 if (!ep) {
1026 ret = -ENOMEM;
1027 return ERR_PTR(ret);
1028 }
1029
1030 qla_ep = ep->dd_data;
1031 memset(qla_ep, 0, sizeof(struct qla_endpoint));
1032 if (dst_addr->sa_family == AF_INET) {
1033 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
1034 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
1035 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
1036 (char *)&addr->sin_addr));
1037 } else if (dst_addr->sa_family == AF_INET6) {
1038 memcpy(&qla_ep->dst_addr, dst_addr,
1039 sizeof(struct sockaddr_in6));
1040 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1041 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1042 (char *)&addr6->sin6_addr));
1043 }
1044
1045 qla_ep->host = shost;
1046
1047 return ep;
1048 }
1049
1050 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1051 {
1052 struct qla_endpoint *qla_ep;
1053 struct scsi_qla_host *ha;
1054 int ret = 0;
1055
1056 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1057 qla_ep = ep->dd_data;
1058 ha = to_qla_host(qla_ep->host);
1059
1060 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1061 ret = 1;
1062
1063 return ret;
1064 }
1065
1066 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1067 {
1068 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1069 iscsi_destroy_endpoint(ep);
1070 }
1071
1072 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1073 enum iscsi_param param,
1074 char *buf)
1075 {
1076 struct qla_endpoint *qla_ep = ep->dd_data;
1077 struct sockaddr *dst_addr;
1078
1079 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1080
1081 switch (param) {
1082 case ISCSI_PARAM_CONN_PORT:
1083 case ISCSI_PARAM_CONN_ADDRESS:
1084 if (!qla_ep)
1085 return -ENOTCONN;
1086
1087 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1088 if (!dst_addr)
1089 return -ENOTCONN;
1090
1091 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1092 &qla_ep->dst_addr, param, buf);
1093 default:
1094 return -ENOSYS;
1095 }
1096 }
1097
1098 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1099 struct iscsi_stats *stats)
1100 {
1101 struct iscsi_session *sess;
1102 struct iscsi_cls_session *cls_sess;
1103 struct ddb_entry *ddb_entry;
1104 struct scsi_qla_host *ha;
1105 struct ql_iscsi_stats *ql_iscsi_stats;
1106 int stats_size;
1107 int ret;
1108 dma_addr_t iscsi_stats_dma;
1109
1110 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1111
1112 cls_sess = iscsi_conn_to_session(cls_conn);
1113 sess = cls_sess->dd_data;
1114 ddb_entry = sess->dd_data;
1115 ha = ddb_entry->ha;
1116
1117 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1118 /* Allocate memory */
1119 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1120 &iscsi_stats_dma, GFP_KERNEL);
1121 if (!ql_iscsi_stats) {
1122 ql4_printk(KERN_ERR, ha,
1123 "Unable to allocate memory for iscsi stats\n");
1124 goto exit_get_stats;
1125 }
1126
1127 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
1128 iscsi_stats_dma);
1129 if (ret != QLA_SUCCESS) {
1130 ql4_printk(KERN_ERR, ha,
1131 "Unable to retrieve iscsi stats\n");
1132 goto free_stats;
1133 }
1134
1135 /* octets */
1136 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
1137 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
1138 /* xmit pdus */
1139 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
1140 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
1141 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
1142 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
1143 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
1144 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
1145 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
1146 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
1147 /* recv pdus */
1148 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
1149 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
1150 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
1151 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
1152 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
1153 stats->logoutrsp_pdus =
1154 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
1155 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
1156 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
1157 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
1158
1159 free_stats:
1160 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
1161 iscsi_stats_dma);
1162 exit_get_stats:
1163 return;
1164 }
1165
1166 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
1167 {
1168 struct iscsi_cls_session *session;
1169 struct iscsi_session *sess;
1170 unsigned long flags;
1171 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
1172
1173 session = starget_to_session(scsi_target(sc->device));
1174 sess = session->dd_data;
1175
1176 spin_lock_irqsave(&session->lock, flags);
1177 if (session->state == ISCSI_SESSION_FAILED)
1178 ret = BLK_EH_RESET_TIMER;
1179 spin_unlock_irqrestore(&session->lock, flags);
1180
1181 return ret;
1182 }
1183
1184 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
1185 {
1186 struct scsi_qla_host *ha = to_qla_host(shost);
1187 struct iscsi_cls_host *ihost = shost->shost_data;
1188 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
1189
1190 qla4xxx_get_firmware_state(ha);
1191
1192 switch (ha->addl_fw_state & 0x0F00) {
1193 case FW_ADDSTATE_LINK_SPEED_10MBPS:
1194 speed = ISCSI_PORT_SPEED_10MBPS;
1195 break;
1196 case FW_ADDSTATE_LINK_SPEED_100MBPS:
1197 speed = ISCSI_PORT_SPEED_100MBPS;
1198 break;
1199 case FW_ADDSTATE_LINK_SPEED_1GBPS:
1200 speed = ISCSI_PORT_SPEED_1GBPS;
1201 break;
1202 case FW_ADDSTATE_LINK_SPEED_10GBPS:
1203 speed = ISCSI_PORT_SPEED_10GBPS;
1204 break;
1205 }
1206 ihost->port_speed = speed;
1207 }
1208
1209 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1210 {
1211 struct scsi_qla_host *ha = to_qla_host(shost);
1212 struct iscsi_cls_host *ihost = shost->shost_data;
1213 uint32_t state = ISCSI_PORT_STATE_DOWN;
1214
1215 if (test_bit(AF_LINK_UP, &ha->flags))
1216 state = ISCSI_PORT_STATE_UP;
1217
1218 ihost->port_state = state;
1219 }
1220
1221 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1222 enum iscsi_host_param param, char *buf)
1223 {
1224 struct scsi_qla_host *ha = to_qla_host(shost);
1225 int len;
1226
1227 switch (param) {
1228 case ISCSI_HOST_PARAM_HWADDRESS:
1229 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1230 break;
1231 case ISCSI_HOST_PARAM_IPADDRESS:
1232 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1233 break;
1234 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1235 len = sprintf(buf, "%s\n", ha->name_string);
1236 break;
1237 case ISCSI_HOST_PARAM_PORT_STATE:
1238 qla4xxx_set_port_state(shost);
1239 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1240 break;
1241 case ISCSI_HOST_PARAM_PORT_SPEED:
1242 qla4xxx_set_port_speed(shost);
1243 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1244 break;
1245 default:
1246 return -ENOSYS;
1247 }
1248
1249 return len;
1250 }
1251
1252 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1253 {
1254 if (ha->iface_ipv4)
1255 return;
1256
1257 /* IPv4 */
1258 ha->iface_ipv4 = iscsi_create_iface(ha->host,
1259 &qla4xxx_iscsi_transport,
1260 ISCSI_IFACE_TYPE_IPV4, 0, 0);
1261 if (!ha->iface_ipv4)
1262 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1263 "iface0.\n");
1264 }
1265
1266 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1267 {
1268 if (!ha->iface_ipv6_0)
1269 /* IPv6 iface-0 */
1270 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1271 &qla4xxx_iscsi_transport,
1272 ISCSI_IFACE_TYPE_IPV6, 0,
1273 0);
1274 if (!ha->iface_ipv6_0)
1275 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1276 "iface0.\n");
1277
1278 if (!ha->iface_ipv6_1)
1279 /* IPv6 iface-1 */
1280 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1281 &qla4xxx_iscsi_transport,
1282 ISCSI_IFACE_TYPE_IPV6, 1,
1283 0);
1284 if (!ha->iface_ipv6_1)
1285 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1286 "iface1.\n");
1287 }
1288
1289 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1290 {
1291 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1292 qla4xxx_create_ipv4_iface(ha);
1293
1294 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1295 qla4xxx_create_ipv6_iface(ha);
1296 }
1297
1298 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1299 {
1300 if (ha->iface_ipv4) {
1301 iscsi_destroy_iface(ha->iface_ipv4);
1302 ha->iface_ipv4 = NULL;
1303 }
1304 }
1305
1306 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1307 {
1308 if (ha->iface_ipv6_0) {
1309 iscsi_destroy_iface(ha->iface_ipv6_0);
1310 ha->iface_ipv6_0 = NULL;
1311 }
1312 if (ha->iface_ipv6_1) {
1313 iscsi_destroy_iface(ha->iface_ipv6_1);
1314 ha->iface_ipv6_1 = NULL;
1315 }
1316 }
1317
1318 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1319 {
1320 qla4xxx_destroy_ipv4_iface(ha);
1321 qla4xxx_destroy_ipv6_iface(ha);
1322 }
1323
1324 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1325 struct iscsi_iface_param_info *iface_param,
1326 struct addr_ctrl_blk *init_fw_cb)
1327 {
1328 /*
1329 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1330 * iface_num 1 is valid only for IPv6 Addr.
1331 */
1332 switch (iface_param->param) {
1333 case ISCSI_NET_PARAM_IPV6_ADDR:
1334 if (iface_param->iface_num & 0x1)
1335 /* IPv6 Addr 1 */
1336 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1337 sizeof(init_fw_cb->ipv6_addr1));
1338 else
1339 /* IPv6 Addr 0 */
1340 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1341 sizeof(init_fw_cb->ipv6_addr0));
1342 break;
1343 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1344 if (iface_param->iface_num & 0x1)
1345 break;
1346 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1347 sizeof(init_fw_cb->ipv6_if_id));
1348 break;
1349 case ISCSI_NET_PARAM_IPV6_ROUTER:
1350 if (iface_param->iface_num & 0x1)
1351 break;
1352 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1353 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1354 break;
1355 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1356 /* Autocfg applies to even interface */
1357 if (iface_param->iface_num & 0x1)
1358 break;
1359
1360 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1361 init_fw_cb->ipv6_addtl_opts &=
1362 cpu_to_le16(
1363 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1364 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1365 init_fw_cb->ipv6_addtl_opts |=
1366 cpu_to_le16(
1367 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1368 else
1369 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1370 "IPv6 addr\n");
1371 break;
1372 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1373 /* Autocfg applies to even interface */
1374 if (iface_param->iface_num & 0x1)
1375 break;
1376
1377 if (iface_param->value[0] ==
1378 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1379 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1380 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1381 else if (iface_param->value[0] ==
1382 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1383 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1384 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1385 else
1386 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1387 "IPv6 linklocal addr\n");
1388 break;
1389 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1390 /* Autocfg applies to even interface */
1391 if (iface_param->iface_num & 0x1)
1392 break;
1393
1394 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1395 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1396 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1397 break;
1398 case ISCSI_NET_PARAM_IFACE_ENABLE:
1399 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1400 init_fw_cb->ipv6_opts |=
1401 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1402 qla4xxx_create_ipv6_iface(ha);
1403 } else {
1404 init_fw_cb->ipv6_opts &=
1405 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1406 0xFFFF);
1407 qla4xxx_destroy_ipv6_iface(ha);
1408 }
1409 break;
1410 case ISCSI_NET_PARAM_VLAN_TAG:
1411 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1412 break;
1413 init_fw_cb->ipv6_vlan_tag =
1414 cpu_to_be16(*(uint16_t *)iface_param->value);
1415 break;
1416 case ISCSI_NET_PARAM_VLAN_ENABLED:
1417 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1418 init_fw_cb->ipv6_opts |=
1419 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1420 else
1421 init_fw_cb->ipv6_opts &=
1422 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1423 break;
1424 case ISCSI_NET_PARAM_MTU:
1425 init_fw_cb->eth_mtu_size =
1426 cpu_to_le16(*(uint16_t *)iface_param->value);
1427 break;
1428 case ISCSI_NET_PARAM_PORT:
1429 /* Autocfg applies to even interface */
1430 if (iface_param->iface_num & 0x1)
1431 break;
1432
1433 init_fw_cb->ipv6_port =
1434 cpu_to_le16(*(uint16_t *)iface_param->value);
1435 break;
1436 default:
1437 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1438 iface_param->param);
1439 break;
1440 }
1441 }
1442
1443 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1444 struct iscsi_iface_param_info *iface_param,
1445 struct addr_ctrl_blk *init_fw_cb)
1446 {
1447 switch (iface_param->param) {
1448 case ISCSI_NET_PARAM_IPV4_ADDR:
1449 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1450 sizeof(init_fw_cb->ipv4_addr));
1451 break;
1452 case ISCSI_NET_PARAM_IPV4_SUBNET:
1453 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1454 sizeof(init_fw_cb->ipv4_subnet));
1455 break;
1456 case ISCSI_NET_PARAM_IPV4_GW:
1457 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1458 sizeof(init_fw_cb->ipv4_gw_addr));
1459 break;
1460 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1461 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1462 init_fw_cb->ipv4_tcp_opts |=
1463 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1464 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1465 init_fw_cb->ipv4_tcp_opts &=
1466 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1467 else
1468 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1469 break;
1470 case ISCSI_NET_PARAM_IFACE_ENABLE:
1471 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1472 init_fw_cb->ipv4_ip_opts |=
1473 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1474 qla4xxx_create_ipv4_iface(ha);
1475 } else {
1476 init_fw_cb->ipv4_ip_opts &=
1477 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1478 0xFFFF);
1479 qla4xxx_destroy_ipv4_iface(ha);
1480 }
1481 break;
1482 case ISCSI_NET_PARAM_VLAN_TAG:
1483 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1484 break;
1485 init_fw_cb->ipv4_vlan_tag =
1486 cpu_to_be16(*(uint16_t *)iface_param->value);
1487 break;
1488 case ISCSI_NET_PARAM_VLAN_ENABLED:
1489 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1490 init_fw_cb->ipv4_ip_opts |=
1491 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1492 else
1493 init_fw_cb->ipv4_ip_opts &=
1494 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1495 break;
1496 case ISCSI_NET_PARAM_MTU:
1497 init_fw_cb->eth_mtu_size =
1498 cpu_to_le16(*(uint16_t *)iface_param->value);
1499 break;
1500 case ISCSI_NET_PARAM_PORT:
1501 init_fw_cb->ipv4_port =
1502 cpu_to_le16(*(uint16_t *)iface_param->value);
1503 break;
1504 default:
1505 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1506 iface_param->param);
1507 break;
1508 }
1509 }
1510
1511 static void
1512 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1513 {
1514 struct addr_ctrl_blk_def *acb;
1515 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1516 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1517 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1518 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1519 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1520 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1521 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1522 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1523 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1524 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1525 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1526 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1527 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1528 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1529 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1530 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1531 }
1532
1533 static int
1534 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1535 {
1536 struct scsi_qla_host *ha = to_qla_host(shost);
1537 int rval = 0;
1538 struct iscsi_iface_param_info *iface_param = NULL;
1539 struct addr_ctrl_blk *init_fw_cb = NULL;
1540 dma_addr_t init_fw_cb_dma;
1541 uint32_t mbox_cmd[MBOX_REG_COUNT];
1542 uint32_t mbox_sts[MBOX_REG_COUNT];
1543 uint32_t rem = len;
1544 struct nlattr *attr;
1545
1546 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1547 sizeof(struct addr_ctrl_blk),
1548 &init_fw_cb_dma, GFP_KERNEL);
1549 if (!init_fw_cb) {
1550 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1551 __func__);
1552 return -ENOMEM;
1553 }
1554
1555 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1556 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1557 memset(&mbox_sts, 0, sizeof(mbox_sts));
1558
1559 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1560 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1561 rval = -EIO;
1562 goto exit_init_fw_cb;
1563 }
1564
1565 nla_for_each_attr(attr, data, len, rem) {
1566 iface_param = nla_data(attr);
1567
1568 if (iface_param->param_type != ISCSI_NET_PARAM)
1569 continue;
1570
1571 switch (iface_param->iface_type) {
1572 case ISCSI_IFACE_TYPE_IPV4:
1573 switch (iface_param->iface_num) {
1574 case 0:
1575 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1576 break;
1577 default:
1578 /* Cannot have more than one IPv4 interface */
1579 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1580 "number = %d\n",
1581 iface_param->iface_num);
1582 break;
1583 }
1584 break;
1585 case ISCSI_IFACE_TYPE_IPV6:
1586 switch (iface_param->iface_num) {
1587 case 0:
1588 case 1:
1589 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1590 break;
1591 default:
1592 /* Cannot have more than two IPv6 interface */
1593 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1594 "number = %d\n",
1595 iface_param->iface_num);
1596 break;
1597 }
1598 break;
1599 default:
1600 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1601 break;
1602 }
1603 }
1604
1605 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1606
1607 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1608 sizeof(struct addr_ctrl_blk),
1609 FLASH_OPT_RMW_COMMIT);
1610 if (rval != QLA_SUCCESS) {
1611 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1612 __func__);
1613 rval = -EIO;
1614 goto exit_init_fw_cb;
1615 }
1616
1617 rval = qla4xxx_disable_acb(ha);
1618 if (rval != QLA_SUCCESS) {
1619 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1620 __func__);
1621 rval = -EIO;
1622 goto exit_init_fw_cb;
1623 }
1624
1625 wait_for_completion_timeout(&ha->disable_acb_comp,
1626 DISABLE_ACB_TOV * HZ);
1627
1628 qla4xxx_initcb_to_acb(init_fw_cb);
1629
1630 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1631 if (rval != QLA_SUCCESS) {
1632 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1633 __func__);
1634 rval = -EIO;
1635 goto exit_init_fw_cb;
1636 }
1637
1638 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1639 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1640 init_fw_cb_dma);
1641
1642 exit_init_fw_cb:
1643 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1644 init_fw_cb, init_fw_cb_dma);
1645
1646 return rval;
1647 }
1648
1649 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1650 enum iscsi_param param, char *buf)
1651 {
1652 struct iscsi_session *sess = cls_sess->dd_data;
1653 struct ddb_entry *ddb_entry = sess->dd_data;
1654 struct scsi_qla_host *ha = ddb_entry->ha;
1655 int rval, len;
1656 uint16_t idx;
1657
1658 switch (param) {
1659 case ISCSI_PARAM_CHAP_IN_IDX:
1660 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1661 sess->password_in, BIDI_CHAP,
1662 &idx);
1663 if (rval)
1664 len = sprintf(buf, "\n");
1665 else
1666 len = sprintf(buf, "%hu\n", idx);
1667 break;
1668 case ISCSI_PARAM_CHAP_OUT_IDX:
1669 rval = qla4xxx_get_chap_index(ha, sess->username,
1670 sess->password, LOCAL_CHAP,
1671 &idx);
1672 if (rval)
1673 len = sprintf(buf, "\n");
1674 else
1675 len = sprintf(buf, "%hu\n", idx);
1676 break;
1677 default:
1678 return iscsi_session_get_param(cls_sess, param, buf);
1679 }
1680
1681 return len;
1682 }
1683
1684 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1685 enum iscsi_param param, char *buf)
1686 {
1687 struct iscsi_conn *conn;
1688 struct qla_conn *qla_conn;
1689 struct sockaddr *dst_addr;
1690 int len = 0;
1691
1692 conn = cls_conn->dd_data;
1693 qla_conn = conn->dd_data;
1694 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1695
1696 switch (param) {
1697 case ISCSI_PARAM_CONN_PORT:
1698 case ISCSI_PARAM_CONN_ADDRESS:
1699 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1700 dst_addr, param, buf);
1701 default:
1702 return iscsi_conn_get_param(cls_conn, param, buf);
1703 }
1704
1705 return len;
1706
1707 }
1708
1709 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1710 {
1711 uint32_t mbx_sts = 0;
1712 uint16_t tmp_ddb_index;
1713 int ret;
1714
1715 get_ddb_index:
1716 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1717
1718 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1719 DEBUG2(ql4_printk(KERN_INFO, ha,
1720 "Free DDB index not available\n"));
1721 ret = QLA_ERROR;
1722 goto exit_get_ddb_index;
1723 }
1724
1725 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1726 goto get_ddb_index;
1727
1728 DEBUG2(ql4_printk(KERN_INFO, ha,
1729 "Found a free DDB index at %d\n", tmp_ddb_index));
1730 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1731 if (ret == QLA_ERROR) {
1732 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1733 ql4_printk(KERN_INFO, ha,
1734 "DDB index = %d not available trying next\n",
1735 tmp_ddb_index);
1736 goto get_ddb_index;
1737 }
1738 DEBUG2(ql4_printk(KERN_INFO, ha,
1739 "Free FW DDB not available\n"));
1740 }
1741
1742 *ddb_index = tmp_ddb_index;
1743
1744 exit_get_ddb_index:
1745 return ret;
1746 }
1747
1748 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1749 struct ddb_entry *ddb_entry,
1750 char *existing_ipaddr,
1751 char *user_ipaddr)
1752 {
1753 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1754 char formatted_ipaddr[DDB_IPADDR_LEN];
1755 int status = QLA_SUCCESS, ret = 0;
1756
1757 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1758 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1759 '\0', NULL);
1760 if (ret == 0) {
1761 status = QLA_ERROR;
1762 goto out_match;
1763 }
1764 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1765 } else {
1766 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1767 '\0', NULL);
1768 if (ret == 0) {
1769 status = QLA_ERROR;
1770 goto out_match;
1771 }
1772 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1773 }
1774
1775 if (strcmp(existing_ipaddr, formatted_ipaddr))
1776 status = QLA_ERROR;
1777
1778 out_match:
1779 return status;
1780 }
1781
1782 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1783 struct iscsi_cls_conn *cls_conn)
1784 {
1785 int idx = 0, max_ddbs, rval;
1786 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1787 struct iscsi_session *sess, *existing_sess;
1788 struct iscsi_conn *conn, *existing_conn;
1789 struct ddb_entry *ddb_entry;
1790
1791 sess = cls_sess->dd_data;
1792 conn = cls_conn->dd_data;
1793
1794 if (sess->targetname == NULL ||
1795 conn->persistent_address == NULL ||
1796 conn->persistent_port == 0)
1797 return QLA_ERROR;
1798
1799 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1800 MAX_DEV_DB_ENTRIES;
1801
1802 for (idx = 0; idx < max_ddbs; idx++) {
1803 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1804 if (ddb_entry == NULL)
1805 continue;
1806
1807 if (ddb_entry->ddb_type != FLASH_DDB)
1808 continue;
1809
1810 existing_sess = ddb_entry->sess->dd_data;
1811 existing_conn = ddb_entry->conn->dd_data;
1812
1813 if (existing_sess->targetname == NULL ||
1814 existing_conn->persistent_address == NULL ||
1815 existing_conn->persistent_port == 0)
1816 continue;
1817
1818 DEBUG2(ql4_printk(KERN_INFO, ha,
1819 "IQN = %s User IQN = %s\n",
1820 existing_sess->targetname,
1821 sess->targetname));
1822
1823 DEBUG2(ql4_printk(KERN_INFO, ha,
1824 "IP = %s User IP = %s\n",
1825 existing_conn->persistent_address,
1826 conn->persistent_address));
1827
1828 DEBUG2(ql4_printk(KERN_INFO, ha,
1829 "Port = %d User Port = %d\n",
1830 existing_conn->persistent_port,
1831 conn->persistent_port));
1832
1833 if (strcmp(existing_sess->targetname, sess->targetname))
1834 continue;
1835 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1836 existing_conn->persistent_address,
1837 conn->persistent_address);
1838 if (rval == QLA_ERROR)
1839 continue;
1840 if (existing_conn->persistent_port != conn->persistent_port)
1841 continue;
1842 break;
1843 }
1844
1845 if (idx == max_ddbs)
1846 return QLA_ERROR;
1847
1848 DEBUG2(ql4_printk(KERN_INFO, ha,
1849 "Match found in fwdb sessions\n"));
1850 return QLA_SUCCESS;
1851 }
1852
1853 static struct iscsi_cls_session *
1854 qla4xxx_session_create(struct iscsi_endpoint *ep,
1855 uint16_t cmds_max, uint16_t qdepth,
1856 uint32_t initial_cmdsn)
1857 {
1858 struct iscsi_cls_session *cls_sess;
1859 struct scsi_qla_host *ha;
1860 struct qla_endpoint *qla_ep;
1861 struct ddb_entry *ddb_entry;
1862 uint16_t ddb_index;
1863 struct iscsi_session *sess;
1864 struct sockaddr *dst_addr;
1865 int ret;
1866
1867 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1868 if (!ep) {
1869 printk(KERN_ERR "qla4xxx: missing ep.\n");
1870 return NULL;
1871 }
1872
1873 qla_ep = ep->dd_data;
1874 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1875 ha = to_qla_host(qla_ep->host);
1876
1877 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1878 if (ret == QLA_ERROR)
1879 return NULL;
1880
1881 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1882 cmds_max, sizeof(struct ddb_entry),
1883 sizeof(struct ql4_task_data),
1884 initial_cmdsn, ddb_index);
1885 if (!cls_sess)
1886 return NULL;
1887
1888 sess = cls_sess->dd_data;
1889 ddb_entry = sess->dd_data;
1890 ddb_entry->fw_ddb_index = ddb_index;
1891 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1892 ddb_entry->ha = ha;
1893 ddb_entry->sess = cls_sess;
1894 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1895 ddb_entry->ddb_change = qla4xxx_ddb_change;
1896 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1897 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1898 ha->tot_ddbs++;
1899
1900 return cls_sess;
1901 }
1902
1903 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1904 {
1905 struct iscsi_session *sess;
1906 struct ddb_entry *ddb_entry;
1907 struct scsi_qla_host *ha;
1908 unsigned long flags, wtime;
1909 struct dev_db_entry *fw_ddb_entry = NULL;
1910 dma_addr_t fw_ddb_entry_dma;
1911 uint32_t ddb_state;
1912 int ret;
1913
1914 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1915 sess = cls_sess->dd_data;
1916 ddb_entry = sess->dd_data;
1917 ha = ddb_entry->ha;
1918
1919 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1920 &fw_ddb_entry_dma, GFP_KERNEL);
1921 if (!fw_ddb_entry) {
1922 ql4_printk(KERN_ERR, ha,
1923 "%s: Unable to allocate dma buffer\n", __func__);
1924 goto destroy_session;
1925 }
1926
1927 wtime = jiffies + (HZ * LOGOUT_TOV);
1928 do {
1929 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1930 fw_ddb_entry, fw_ddb_entry_dma,
1931 NULL, NULL, &ddb_state, NULL,
1932 NULL, NULL);
1933 if (ret == QLA_ERROR)
1934 goto destroy_session;
1935
1936 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1937 (ddb_state == DDB_DS_SESSION_FAILED))
1938 goto destroy_session;
1939
1940 schedule_timeout_uninterruptible(HZ);
1941 } while ((time_after(wtime, jiffies)));
1942
1943 destroy_session:
1944 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1945
1946 spin_lock_irqsave(&ha->hardware_lock, flags);
1947 qla4xxx_free_ddb(ha, ddb_entry);
1948 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1949
1950 iscsi_session_teardown(cls_sess);
1951
1952 if (fw_ddb_entry)
1953 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1954 fw_ddb_entry, fw_ddb_entry_dma);
1955 }
1956
1957 static struct iscsi_cls_conn *
1958 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1959 {
1960 struct iscsi_cls_conn *cls_conn;
1961 struct iscsi_session *sess;
1962 struct ddb_entry *ddb_entry;
1963
1964 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1965 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1966 conn_idx);
1967 if (!cls_conn)
1968 return NULL;
1969
1970 sess = cls_sess->dd_data;
1971 ddb_entry = sess->dd_data;
1972 ddb_entry->conn = cls_conn;
1973
1974 return cls_conn;
1975 }
1976
1977 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1978 struct iscsi_cls_conn *cls_conn,
1979 uint64_t transport_fd, int is_leading)
1980 {
1981 struct iscsi_conn *conn;
1982 struct qla_conn *qla_conn;
1983 struct iscsi_endpoint *ep;
1984
1985 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1986
1987 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1988 return -EINVAL;
1989 ep = iscsi_lookup_endpoint(transport_fd);
1990 conn = cls_conn->dd_data;
1991 qla_conn = conn->dd_data;
1992 qla_conn->qla_ep = ep->dd_data;
1993 return 0;
1994 }
1995
1996 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1997 {
1998 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1999 struct iscsi_session *sess;
2000 struct ddb_entry *ddb_entry;
2001 struct scsi_qla_host *ha;
2002 struct dev_db_entry *fw_ddb_entry = NULL;
2003 dma_addr_t fw_ddb_entry_dma;
2004 uint32_t mbx_sts = 0;
2005 int ret = 0;
2006 int status = QLA_SUCCESS;
2007
2008 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
2009 sess = cls_sess->dd_data;
2010 ddb_entry = sess->dd_data;
2011 ha = ddb_entry->ha;
2012
2013 /* Check if we have matching FW DDB, if yes then do not
2014 * login to this target. This could cause target to logout previous
2015 * connection
2016 */
2017 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
2018 if (ret == QLA_SUCCESS) {
2019 ql4_printk(KERN_INFO, ha,
2020 "Session already exist in FW.\n");
2021 ret = -EEXIST;
2022 goto exit_conn_start;
2023 }
2024
2025 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2026 &fw_ddb_entry_dma, GFP_KERNEL);
2027 if (!fw_ddb_entry) {
2028 ql4_printk(KERN_ERR, ha,
2029 "%s: Unable to allocate dma buffer\n", __func__);
2030 ret = -ENOMEM;
2031 goto exit_conn_start;
2032 }
2033
2034 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
2035 if (ret) {
2036 /* If iscsid is stopped and started then no need to do
2037 * set param again since ddb state will be already
2038 * active and FW does not allow set ddb to an
2039 * active session.
2040 */
2041 if (mbx_sts)
2042 if (ddb_entry->fw_ddb_device_state ==
2043 DDB_DS_SESSION_ACTIVE) {
2044 ddb_entry->unblock_sess(ddb_entry->sess);
2045 goto exit_set_param;
2046 }
2047
2048 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
2049 __func__, ddb_entry->fw_ddb_index);
2050 goto exit_conn_start;
2051 }
2052
2053 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
2054 if (status == QLA_ERROR) {
2055 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
2056 sess->targetname);
2057 ret = -EINVAL;
2058 goto exit_conn_start;
2059 }
2060
2061 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
2062 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
2063
2064 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
2065 ddb_entry->fw_ddb_device_state));
2066
2067 exit_set_param:
2068 ret = 0;
2069
2070 exit_conn_start:
2071 if (fw_ddb_entry)
2072 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2073 fw_ddb_entry, fw_ddb_entry_dma);
2074 return ret;
2075 }
2076
2077 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
2078 {
2079 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2080 struct iscsi_session *sess;
2081 struct scsi_qla_host *ha;
2082 struct ddb_entry *ddb_entry;
2083 int options;
2084
2085 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
2086 sess = cls_sess->dd_data;
2087 ddb_entry = sess->dd_data;
2088 ha = ddb_entry->ha;
2089
2090 options = LOGOUT_OPTION_CLOSE_SESSION;
2091 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
2092 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
2093 }
2094
2095 static void qla4xxx_task_work(struct work_struct *wdata)
2096 {
2097 struct ql4_task_data *task_data;
2098 struct scsi_qla_host *ha;
2099 struct passthru_status *sts;
2100 struct iscsi_task *task;
2101 struct iscsi_hdr *hdr;
2102 uint8_t *data;
2103 uint32_t data_len;
2104 struct iscsi_conn *conn;
2105 int hdr_len;
2106 itt_t itt;
2107
2108 task_data = container_of(wdata, struct ql4_task_data, task_work);
2109 ha = task_data->ha;
2110 task = task_data->task;
2111 sts = &task_data->sts;
2112 hdr_len = sizeof(struct iscsi_hdr);
2113
2114 DEBUG3(printk(KERN_INFO "Status returned\n"));
2115 DEBUG3(qla4xxx_dump_buffer(sts, 64));
2116 DEBUG3(printk(KERN_INFO "Response buffer"));
2117 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
2118
2119 conn = task->conn;
2120
2121 switch (sts->completionStatus) {
2122 case PASSTHRU_STATUS_COMPLETE:
2123 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
2124 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
2125 itt = sts->handle;
2126 hdr->itt = itt;
2127 data = task_data->resp_buffer + hdr_len;
2128 data_len = task_data->resp_len - hdr_len;
2129 iscsi_complete_pdu(conn, hdr, data, data_len);
2130 break;
2131 default:
2132 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
2133 sts->completionStatus);
2134 break;
2135 }
2136 return;
2137 }
2138
2139 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2140 {
2141 struct ql4_task_data *task_data;
2142 struct iscsi_session *sess;
2143 struct ddb_entry *ddb_entry;
2144 struct scsi_qla_host *ha;
2145 int hdr_len;
2146
2147 sess = task->conn->session;
2148 ddb_entry = sess->dd_data;
2149 ha = ddb_entry->ha;
2150 task_data = task->dd_data;
2151 memset(task_data, 0, sizeof(struct ql4_task_data));
2152
2153 if (task->sc) {
2154 ql4_printk(KERN_INFO, ha,
2155 "%s: SCSI Commands not implemented\n", __func__);
2156 return -EINVAL;
2157 }
2158
2159 hdr_len = sizeof(struct iscsi_hdr);
2160 task_data->ha = ha;
2161 task_data->task = task;
2162
2163 if (task->data_count) {
2164 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
2165 task->data_count,
2166 PCI_DMA_TODEVICE);
2167 }
2168
2169 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2170 __func__, task->conn->max_recv_dlength, hdr_len));
2171
2172 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
2173 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
2174 task_data->resp_len,
2175 &task_data->resp_dma,
2176 GFP_ATOMIC);
2177 if (!task_data->resp_buffer)
2178 goto exit_alloc_pdu;
2179
2180 task_data->req_len = task->data_count + hdr_len;
2181 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
2182 task_data->req_len,
2183 &task_data->req_dma,
2184 GFP_ATOMIC);
2185 if (!task_data->req_buffer)
2186 goto exit_alloc_pdu;
2187
2188 task->hdr = task_data->req_buffer;
2189
2190 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
2191
2192 return 0;
2193
2194 exit_alloc_pdu:
2195 if (task_data->resp_buffer)
2196 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2197 task_data->resp_buffer, task_data->resp_dma);
2198
2199 if (task_data->req_buffer)
2200 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2201 task_data->req_buffer, task_data->req_dma);
2202 return -ENOMEM;
2203 }
2204
2205 static void qla4xxx_task_cleanup(struct iscsi_task *task)
2206 {
2207 struct ql4_task_data *task_data;
2208 struct iscsi_session *sess;
2209 struct ddb_entry *ddb_entry;
2210 struct scsi_qla_host *ha;
2211 int hdr_len;
2212
2213 hdr_len = sizeof(struct iscsi_hdr);
2214 sess = task->conn->session;
2215 ddb_entry = sess->dd_data;
2216 ha = ddb_entry->ha;
2217 task_data = task->dd_data;
2218
2219 if (task->data_count) {
2220 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
2221 task->data_count, PCI_DMA_TODEVICE);
2222 }
2223
2224 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2225 __func__, task->conn->max_recv_dlength, hdr_len));
2226
2227 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2228 task_data->resp_buffer, task_data->resp_dma);
2229 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2230 task_data->req_buffer, task_data->req_dma);
2231 return;
2232 }
2233
2234 static int qla4xxx_task_xmit(struct iscsi_task *task)
2235 {
2236 struct scsi_cmnd *sc = task->sc;
2237 struct iscsi_session *sess = task->conn->session;
2238 struct ddb_entry *ddb_entry = sess->dd_data;
2239 struct scsi_qla_host *ha = ddb_entry->ha;
2240
2241 if (!sc)
2242 return qla4xxx_send_passthru0(task);
2243
2244 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
2245 __func__);
2246 return -ENOSYS;
2247 }
2248
2249 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2250 struct iscsi_bus_flash_conn *conn,
2251 struct dev_db_entry *fw_ddb_entry)
2252 {
2253 unsigned long options = 0;
2254 int rc = 0;
2255
2256 options = le16_to_cpu(fw_ddb_entry->options);
2257 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2258 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2259 rc = iscsi_switch_str_param(&sess->portal_type,
2260 PORTAL_TYPE_IPV6);
2261 if (rc)
2262 goto exit_copy;
2263 } else {
2264 rc = iscsi_switch_str_param(&sess->portal_type,
2265 PORTAL_TYPE_IPV4);
2266 if (rc)
2267 goto exit_copy;
2268 }
2269
2270 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2271 &options);
2272 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2273 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2274
2275 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2276 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2277 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2278 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2279 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2280 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2281 &options);
2282 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2283 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2284 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2285 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2286 &options);
2287 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2288 sess->discovery_auth_optional =
2289 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2290 if (test_bit(ISCSIOPT_ERL1, &options))
2291 sess->erl |= BIT_1;
2292 if (test_bit(ISCSIOPT_ERL0, &options))
2293 sess->erl |= BIT_0;
2294
2295 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2296 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2297 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2298 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2299 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2300 conn->tcp_timer_scale |= BIT_3;
2301 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2302 conn->tcp_timer_scale |= BIT_2;
2303 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2304 conn->tcp_timer_scale |= BIT_1;
2305
2306 conn->tcp_timer_scale >>= 1;
2307 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2308
2309 options = le16_to_cpu(fw_ddb_entry->ip_options);
2310 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2311
2312 conn->max_recv_dlength = BYTE_UNITS *
2313 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2314 conn->max_xmit_dlength = BYTE_UNITS *
2315 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2316 sess->first_burst = BYTE_UNITS *
2317 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2318 sess->max_burst = BYTE_UNITS *
2319 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2320 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2321 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2322 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2323 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2324 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2325 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2326 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2327 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2328 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2329 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2330 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2331 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2332 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2333 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2334 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2335 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2336
2337 sess->default_taskmgmt_timeout =
2338 le16_to_cpu(fw_ddb_entry->def_timeout);
2339 conn->port = le16_to_cpu(fw_ddb_entry->port);
2340
2341 options = le16_to_cpu(fw_ddb_entry->options);
2342 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2343 if (!conn->ipaddress) {
2344 rc = -ENOMEM;
2345 goto exit_copy;
2346 }
2347
2348 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2349 if (!conn->redirect_ipaddr) {
2350 rc = -ENOMEM;
2351 goto exit_copy;
2352 }
2353
2354 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2355 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2356
2357 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2358 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2359
2360 conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2361 if (!conn->link_local_ipv6_addr) {
2362 rc = -ENOMEM;
2363 goto exit_copy;
2364 }
2365
2366 memcpy(conn->link_local_ipv6_addr,
2367 fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2368 } else {
2369 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2370 }
2371
2372 if (fw_ddb_entry->iscsi_name[0]) {
2373 rc = iscsi_switch_str_param(&sess->targetname,
2374 (char *)fw_ddb_entry->iscsi_name);
2375 if (rc)
2376 goto exit_copy;
2377 }
2378
2379 if (fw_ddb_entry->iscsi_alias[0]) {
2380 rc = iscsi_switch_str_param(&sess->targetalias,
2381 (char *)fw_ddb_entry->iscsi_alias);
2382 if (rc)
2383 goto exit_copy;
2384 }
2385
2386 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2387
2388 exit_copy:
2389 return rc;
2390 }
2391
2392 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2393 struct iscsi_bus_flash_conn *conn,
2394 struct dev_db_entry *fw_ddb_entry)
2395 {
2396 uint16_t options;
2397 int rc = 0;
2398
2399 options = le16_to_cpu(fw_ddb_entry->options);
2400 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
2401 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2402 options |= BIT_8;
2403 else
2404 options &= ~BIT_8;
2405
2406 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2407 SET_BITVAL(sess->discovery_sess, options, BIT_4);
2408 SET_BITVAL(sess->entry_state, options, BIT_3);
2409 fw_ddb_entry->options = cpu_to_le16(options);
2410
2411 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2412 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2413 SET_BITVAL(conn->datadgst_en, options, BIT_12);
2414 SET_BITVAL(sess->imm_data_en, options, BIT_11);
2415 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2416 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2417 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2418 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2419 SET_BITVAL(conn->snack_req_en, options, BIT_6);
2420 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2421 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2422 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2423 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2424 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2425 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2426
2427 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2428 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2429 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2430 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2431 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2432 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2433 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2434 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2435 fw_ddb_entry->tcp_options = cpu_to_le16(options);
2436
2437 options = le16_to_cpu(fw_ddb_entry->ip_options);
2438 SET_BITVAL(conn->fragment_disable, options, BIT_4);
2439 fw_ddb_entry->ip_options = cpu_to_le16(options);
2440
2441 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2442 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2443 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2444 fw_ddb_entry->iscsi_max_snd_data_seg_len =
2445 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2446 fw_ddb_entry->iscsi_first_burst_len =
2447 cpu_to_le16(sess->first_burst / BYTE_UNITS);
2448 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2449 BYTE_UNITS);
2450 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2451 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2452 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2453 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2454 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2455 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2456 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2457 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2458 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2459 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2460 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2461 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
2462 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2463 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2464 fw_ddb_entry->port = cpu_to_le16(conn->port);
2465 fw_ddb_entry->def_timeout =
2466 cpu_to_le16(sess->default_taskmgmt_timeout);
2467
2468 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2469 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
2470 else
2471 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2472
2473 if (conn->ipaddress)
2474 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2475 sizeof(fw_ddb_entry->ip_addr));
2476
2477 if (conn->redirect_ipaddr)
2478 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2479 sizeof(fw_ddb_entry->tgt_addr));
2480
2481 if (conn->link_local_ipv6_addr)
2482 memcpy(fw_ddb_entry->link_local_ipv6_addr,
2483 conn->link_local_ipv6_addr,
2484 sizeof(fw_ddb_entry->link_local_ipv6_addr));
2485
2486 if (sess->targetname)
2487 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2488 sizeof(fw_ddb_entry->iscsi_name));
2489
2490 if (sess->targetalias)
2491 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2492 sizeof(fw_ddb_entry->iscsi_alias));
2493
2494 COPY_ISID(fw_ddb_entry->isid, sess->isid);
2495
2496 return rc;
2497 }
2498
2499 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
2500 struct iscsi_session *sess,
2501 struct dev_db_entry *fw_ddb_entry)
2502 {
2503 unsigned long options = 0;
2504 uint16_t ddb_link;
2505 uint16_t disc_parent;
2506
2507 options = le16_to_cpu(fw_ddb_entry->options);
2508 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2509 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2510 &options);
2511 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2512
2513 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2514 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2515 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2516 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2517 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2518 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2519 &options);
2520 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2521 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2522 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2523 &options);
2524 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2525 sess->discovery_auth_optional =
2526 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2527 if (test_bit(ISCSIOPT_ERL1, &options))
2528 sess->erl |= BIT_1;
2529 if (test_bit(ISCSIOPT_ERL0, &options))
2530 sess->erl |= BIT_0;
2531
2532 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2533 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2534 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2535 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2536 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2537 conn->tcp_timer_scale |= BIT_3;
2538 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2539 conn->tcp_timer_scale |= BIT_2;
2540 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2541 conn->tcp_timer_scale |= BIT_1;
2542
2543 conn->tcp_timer_scale >>= 1;
2544 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2545
2546 options = le16_to_cpu(fw_ddb_entry->ip_options);
2547 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2548
2549 conn->max_recv_dlength = BYTE_UNITS *
2550 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2551 conn->max_xmit_dlength = BYTE_UNITS *
2552 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2553 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2554 sess->first_burst = BYTE_UNITS *
2555 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2556 sess->max_burst = BYTE_UNITS *
2557 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2558 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2559 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2560 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2561 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2562 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2563 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2564 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2565 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
2566 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2567 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2568 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2569 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2570 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2571
2572 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
2573 if (ddb_link == DDB_ISNS)
2574 disc_parent = ISCSI_DISC_PARENT_ISNS;
2575 else if (ddb_link == DDB_NO_LINK)
2576 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2577 else if (ddb_link < MAX_DDB_ENTRIES)
2578 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
2579 else
2580 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2581
2582 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
2583 iscsi_get_discovery_parent_name(disc_parent), 0);
2584
2585 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2586 (char *)fw_ddb_entry->iscsi_alias, 0);
2587 }
2588
2589 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2590 struct dev_db_entry *fw_ddb_entry,
2591 struct iscsi_cls_session *cls_sess,
2592 struct iscsi_cls_conn *cls_conn)
2593 {
2594 int buflen = 0;
2595 struct iscsi_session *sess;
2596 struct ddb_entry *ddb_entry;
2597 struct ql4_chap_table chap_tbl;
2598 struct iscsi_conn *conn;
2599 char ip_addr[DDB_IPADDR_LEN];
2600 uint16_t options = 0;
2601
2602 sess = cls_sess->dd_data;
2603 ddb_entry = sess->dd_data;
2604 conn = cls_conn->dd_data;
2605 memset(&chap_tbl, 0, sizeof(chap_tbl));
2606
2607 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2608
2609 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2610
2611 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
2612 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2613
2614 memset(ip_addr, 0, sizeof(ip_addr));
2615 options = le16_to_cpu(fw_ddb_entry->options);
2616 if (options & DDB_OPT_IPV6_DEVICE) {
2617 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
2618
2619 memset(ip_addr, 0, sizeof(ip_addr));
2620 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2621 } else {
2622 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
2623 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2624 }
2625
2626 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2627 (char *)ip_addr, buflen);
2628 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2629 (char *)fw_ddb_entry->iscsi_name, buflen);
2630 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2631 (char *)ha->name_string, buflen);
2632
2633 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2634 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2635 chap_tbl.secret,
2636 ddb_entry->chap_tbl_idx)) {
2637 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2638 (char *)chap_tbl.name,
2639 strlen((char *)chap_tbl.name));
2640 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2641 (char *)chap_tbl.secret,
2642 chap_tbl.secret_len);
2643 }
2644 }
2645 }
2646
2647 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
2648 struct ddb_entry *ddb_entry)
2649 {
2650 struct iscsi_cls_session *cls_sess;
2651 struct iscsi_cls_conn *cls_conn;
2652 uint32_t ddb_state;
2653 dma_addr_t fw_ddb_entry_dma;
2654 struct dev_db_entry *fw_ddb_entry;
2655
2656 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2657 &fw_ddb_entry_dma, GFP_KERNEL);
2658 if (!fw_ddb_entry) {
2659 ql4_printk(KERN_ERR, ha,
2660 "%s: Unable to allocate dma buffer\n", __func__);
2661 goto exit_session_conn_fwddb_param;
2662 }
2663
2664 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2665 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2666 NULL, NULL, NULL) == QLA_ERROR) {
2667 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2668 "get_ddb_entry for fw_ddb_index %d\n",
2669 ha->host_no, __func__,
2670 ddb_entry->fw_ddb_index));
2671 goto exit_session_conn_fwddb_param;
2672 }
2673
2674 cls_sess = ddb_entry->sess;
2675
2676 cls_conn = ddb_entry->conn;
2677
2678 /* Update params */
2679 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2680
2681 exit_session_conn_fwddb_param:
2682 if (fw_ddb_entry)
2683 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2684 fw_ddb_entry, fw_ddb_entry_dma);
2685 }
2686
2687 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2688 struct ddb_entry *ddb_entry)
2689 {
2690 struct iscsi_cls_session *cls_sess;
2691 struct iscsi_cls_conn *cls_conn;
2692 struct iscsi_session *sess;
2693 struct iscsi_conn *conn;
2694 uint32_t ddb_state;
2695 dma_addr_t fw_ddb_entry_dma;
2696 struct dev_db_entry *fw_ddb_entry;
2697
2698 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2699 &fw_ddb_entry_dma, GFP_KERNEL);
2700 if (!fw_ddb_entry) {
2701 ql4_printk(KERN_ERR, ha,
2702 "%s: Unable to allocate dma buffer\n", __func__);
2703 goto exit_session_conn_param;
2704 }
2705
2706 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2707 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2708 NULL, NULL, NULL) == QLA_ERROR) {
2709 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2710 "get_ddb_entry for fw_ddb_index %d\n",
2711 ha->host_no, __func__,
2712 ddb_entry->fw_ddb_index));
2713 goto exit_session_conn_param;
2714 }
2715
2716 cls_sess = ddb_entry->sess;
2717 sess = cls_sess->dd_data;
2718
2719 cls_conn = ddb_entry->conn;
2720 conn = cls_conn->dd_data;
2721
2722 /* Update timers after login */
2723 ddb_entry->default_relogin_timeout =
2724 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2725 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2726 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2727 ddb_entry->default_time2wait =
2728 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2729
2730 /* Update params */
2731 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2732 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2733
2734 memcpy(sess->initiatorname, ha->name_string,
2735 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2736
2737 exit_session_conn_param:
2738 if (fw_ddb_entry)
2739 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2740 fw_ddb_entry, fw_ddb_entry_dma);
2741 }
2742
2743 /*
2744 * Timer routines
2745 */
2746
2747 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2748 unsigned long interval)
2749 {
2750 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2751 __func__, ha->host->host_no));
2752 init_timer(&ha->timer);
2753 ha->timer.expires = jiffies + interval * HZ;
2754 ha->timer.data = (unsigned long)ha;
2755 ha->timer.function = (void (*)(unsigned long))func;
2756 add_timer(&ha->timer);
2757 ha->timer_active = 1;
2758 }
2759
2760 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2761 {
2762 del_timer_sync(&ha->timer);
2763 ha->timer_active = 0;
2764 }
2765
2766 /***
2767 * qla4xxx_mark_device_missing - blocks the session
2768 * @cls_session: Pointer to the session to be blocked
2769 * @ddb_entry: Pointer to device database entry
2770 *
2771 * This routine marks a device missing and close connection.
2772 **/
2773 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2774 {
2775 iscsi_block_session(cls_session);
2776 }
2777
2778 /**
2779 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2780 * @ha: Pointer to host adapter structure.
2781 *
2782 * This routine marks a device missing and resets the relogin retry count.
2783 **/
2784 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2785 {
2786 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2787 }
2788
2789 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2790 struct ddb_entry *ddb_entry,
2791 struct scsi_cmnd *cmd)
2792 {
2793 struct srb *srb;
2794
2795 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2796 if (!srb)
2797 return srb;
2798
2799 kref_init(&srb->srb_ref);
2800 srb->ha = ha;
2801 srb->ddb = ddb_entry;
2802 srb->cmd = cmd;
2803 srb->flags = 0;
2804 CMD_SP(cmd) = (void *)srb;
2805
2806 return srb;
2807 }
2808
2809 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2810 {
2811 struct scsi_cmnd *cmd = srb->cmd;
2812
2813 if (srb->flags & SRB_DMA_VALID) {
2814 scsi_dma_unmap(cmd);
2815 srb->flags &= ~SRB_DMA_VALID;
2816 }
2817 CMD_SP(cmd) = NULL;
2818 }
2819
2820 void qla4xxx_srb_compl(struct kref *ref)
2821 {
2822 struct srb *srb = container_of(ref, struct srb, srb_ref);
2823 struct scsi_cmnd *cmd = srb->cmd;
2824 struct scsi_qla_host *ha = srb->ha;
2825
2826 qla4xxx_srb_free_dma(ha, srb);
2827
2828 mempool_free(srb, ha->srb_mempool);
2829
2830 cmd->scsi_done(cmd);
2831 }
2832
2833 /**
2834 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2835 * @host: scsi host
2836 * @cmd: Pointer to Linux's SCSI command structure
2837 *
2838 * Remarks:
2839 * This routine is invoked by Linux to send a SCSI command to the driver.
2840 * The mid-level driver tries to ensure that queuecommand never gets
2841 * invoked concurrently with itself or the interrupt handler (although
2842 * the interrupt handler may call this routine as part of request-
2843 * completion handling). Unfortunely, it sometimes calls the scheduler
2844 * in interrupt context which is a big NO! NO!.
2845 **/
2846 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2847 {
2848 struct scsi_qla_host *ha = to_qla_host(host);
2849 struct ddb_entry *ddb_entry = cmd->device->hostdata;
2850 struct iscsi_cls_session *sess = ddb_entry->sess;
2851 struct srb *srb;
2852 int rval;
2853
2854 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2855 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2856 cmd->result = DID_NO_CONNECT << 16;
2857 else
2858 cmd->result = DID_REQUEUE << 16;
2859 goto qc_fail_command;
2860 }
2861
2862 if (!sess) {
2863 cmd->result = DID_IMM_RETRY << 16;
2864 goto qc_fail_command;
2865 }
2866
2867 rval = iscsi_session_chkready(sess);
2868 if (rval) {
2869 cmd->result = rval;
2870 goto qc_fail_command;
2871 }
2872
2873 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2874 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2875 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2876 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2877 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2878 !test_bit(AF_ONLINE, &ha->flags) ||
2879 !test_bit(AF_LINK_UP, &ha->flags) ||
2880 test_bit(AF_LOOPBACK, &ha->flags) ||
2881 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
2882 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
2883 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2884 goto qc_host_busy;
2885
2886 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2887 if (!srb)
2888 goto qc_host_busy;
2889
2890 rval = qla4xxx_send_command_to_isp(ha, srb);
2891 if (rval != QLA_SUCCESS)
2892 goto qc_host_busy_free_sp;
2893
2894 return 0;
2895
2896 qc_host_busy_free_sp:
2897 qla4xxx_srb_free_dma(ha, srb);
2898 mempool_free(srb, ha->srb_mempool);
2899
2900 qc_host_busy:
2901 return SCSI_MLQUEUE_HOST_BUSY;
2902
2903 qc_fail_command:
2904 cmd->scsi_done(cmd);
2905
2906 return 0;
2907 }
2908
2909 /**
2910 * qla4xxx_mem_free - frees memory allocated to adapter
2911 * @ha: Pointer to host adapter structure.
2912 *
2913 * Frees memory previously allocated by qla4xxx_mem_alloc
2914 **/
2915 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2916 {
2917 if (ha->queues)
2918 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2919 ha->queues_dma);
2920
2921 if (ha->fw_dump)
2922 vfree(ha->fw_dump);
2923
2924 ha->queues_len = 0;
2925 ha->queues = NULL;
2926 ha->queues_dma = 0;
2927 ha->request_ring = NULL;
2928 ha->request_dma = 0;
2929 ha->response_ring = NULL;
2930 ha->response_dma = 0;
2931 ha->shadow_regs = NULL;
2932 ha->shadow_regs_dma = 0;
2933 ha->fw_dump = NULL;
2934 ha->fw_dump_size = 0;
2935
2936 /* Free srb pool. */
2937 if (ha->srb_mempool)
2938 mempool_destroy(ha->srb_mempool);
2939
2940 ha->srb_mempool = NULL;
2941
2942 if (ha->chap_dma_pool)
2943 dma_pool_destroy(ha->chap_dma_pool);
2944
2945 if (ha->chap_list)
2946 vfree(ha->chap_list);
2947 ha->chap_list = NULL;
2948
2949 if (ha->fw_ddb_dma_pool)
2950 dma_pool_destroy(ha->fw_ddb_dma_pool);
2951
2952 /* release io space registers */
2953 if (is_qla8022(ha)) {
2954 if (ha->nx_pcibase)
2955 iounmap(
2956 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2957 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2958 if (ha->nx_pcibase)
2959 iounmap(
2960 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2961 } else if (ha->reg) {
2962 iounmap(ha->reg);
2963 }
2964
2965 if (ha->reset_tmplt.buff)
2966 vfree(ha->reset_tmplt.buff);
2967
2968 pci_release_regions(ha->pdev);
2969 }
2970
2971 /**
2972 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2973 * @ha: Pointer to host adapter structure
2974 *
2975 * Allocates DMA memory for request and response queues. Also allocates memory
2976 * for srbs.
2977 **/
2978 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2979 {
2980 unsigned long align;
2981
2982 /* Allocate contiguous block of DMA memory for queues. */
2983 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2984 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2985 sizeof(struct shadow_regs) +
2986 MEM_ALIGN_VALUE +
2987 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2988 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2989 &ha->queues_dma, GFP_KERNEL);
2990 if (ha->queues == NULL) {
2991 ql4_printk(KERN_WARNING, ha,
2992 "Memory Allocation failed - queues.\n");
2993
2994 goto mem_alloc_error_exit;
2995 }
2996 memset(ha->queues, 0, ha->queues_len);
2997
2998 /*
2999 * As per RISC alignment requirements -- the bus-address must be a
3000 * multiple of the request-ring size (in bytes).
3001 */
3002 align = 0;
3003 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
3004 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
3005 (MEM_ALIGN_VALUE - 1));
3006
3007 /* Update request and response queue pointers. */
3008 ha->request_dma = ha->queues_dma + align;
3009 ha->request_ring = (struct queue_entry *) (ha->queues + align);
3010 ha->response_dma = ha->queues_dma + align +
3011 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
3012 ha->response_ring = (struct queue_entry *) (ha->queues + align +
3013 (REQUEST_QUEUE_DEPTH *
3014 QUEUE_SIZE));
3015 ha->shadow_regs_dma = ha->queues_dma + align +
3016 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
3017 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
3018 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
3019 (REQUEST_QUEUE_DEPTH *
3020 QUEUE_SIZE) +
3021 (RESPONSE_QUEUE_DEPTH *
3022 QUEUE_SIZE));
3023
3024 /* Allocate memory for srb pool. */
3025 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
3026 mempool_free_slab, srb_cachep);
3027 if (ha->srb_mempool == NULL) {
3028 ql4_printk(KERN_WARNING, ha,
3029 "Memory Allocation failed - SRB Pool.\n");
3030
3031 goto mem_alloc_error_exit;
3032 }
3033
3034 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
3035 CHAP_DMA_BLOCK_SIZE, 8, 0);
3036
3037 if (ha->chap_dma_pool == NULL) {
3038 ql4_printk(KERN_WARNING, ha,
3039 "%s: chap_dma_pool allocation failed..\n", __func__);
3040 goto mem_alloc_error_exit;
3041 }
3042
3043 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
3044 DDB_DMA_BLOCK_SIZE, 8, 0);
3045
3046 if (ha->fw_ddb_dma_pool == NULL) {
3047 ql4_printk(KERN_WARNING, ha,
3048 "%s: fw_ddb_dma_pool allocation failed..\n",
3049 __func__);
3050 goto mem_alloc_error_exit;
3051 }
3052
3053 return QLA_SUCCESS;
3054
3055 mem_alloc_error_exit:
3056 qla4xxx_mem_free(ha);
3057 return QLA_ERROR;
3058 }
3059
3060 /**
3061 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
3062 * @ha: adapter block pointer.
3063 *
3064 * Note: The caller should not hold the idc lock.
3065 **/
3066 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
3067 {
3068 uint32_t temp, temp_state, temp_val;
3069 int status = QLA_SUCCESS;
3070
3071 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
3072
3073 temp_state = qla82xx_get_temp_state(temp);
3074 temp_val = qla82xx_get_temp_val(temp);
3075
3076 if (temp_state == QLA82XX_TEMP_PANIC) {
3077 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
3078 " exceeds maximum allowed. Hardware has been shut"
3079 " down.\n", temp_val);
3080 status = QLA_ERROR;
3081 } else if (temp_state == QLA82XX_TEMP_WARN) {
3082 if (ha->temperature == QLA82XX_TEMP_NORMAL)
3083 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
3084 " degrees C exceeds operating range."
3085 " Immediate action needed.\n", temp_val);
3086 } else {
3087 if (ha->temperature == QLA82XX_TEMP_WARN)
3088 ql4_printk(KERN_INFO, ha, "Device temperature is"
3089 " now %d degrees C in normal range.\n",
3090 temp_val);
3091 }
3092 ha->temperature = temp_state;
3093 return status;
3094 }
3095
3096 /**
3097 * qla4_8xxx_check_fw_alive - Check firmware health
3098 * @ha: Pointer to host adapter structure.
3099 *
3100 * Context: Interrupt
3101 **/
3102 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
3103 {
3104 uint32_t fw_heartbeat_counter;
3105 int status = QLA_SUCCESS;
3106
3107 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
3108 QLA8XXX_PEG_ALIVE_COUNTER);
3109 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
3110 if (fw_heartbeat_counter == 0xffffffff) {
3111 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
3112 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
3113 ha->host_no, __func__));
3114 return status;
3115 }
3116
3117 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
3118 ha->seconds_since_last_heartbeat++;
3119 /* FW not alive after 2 seconds */
3120 if (ha->seconds_since_last_heartbeat == 2) {
3121 ha->seconds_since_last_heartbeat = 0;
3122 qla4_8xxx_dump_peg_reg(ha);
3123 status = QLA_ERROR;
3124 }
3125 } else
3126 ha->seconds_since_last_heartbeat = 0;
3127
3128 ha->fw_heartbeat_counter = fw_heartbeat_counter;
3129 return status;
3130 }
3131
3132 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
3133 {
3134 uint32_t halt_status;
3135 int halt_status_unrecoverable = 0;
3136
3137 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
3138
3139 if (is_qla8022(ha)) {
3140 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3141 __func__);
3142 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
3143 CRB_NIU_XG_PAUSE_CTL_P0 |
3144 CRB_NIU_XG_PAUSE_CTL_P1);
3145
3146 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
3147 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
3148 __func__);
3149 if (halt_status & HALT_STATUS_UNRECOVERABLE)
3150 halt_status_unrecoverable = 1;
3151 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3152 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
3153 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
3154 __func__);
3155 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
3156 halt_status_unrecoverable = 1;
3157 }
3158
3159 /*
3160 * Since we cannot change dev_state in interrupt context,
3161 * set appropriate DPC flag then wakeup DPC
3162 */
3163 if (halt_status_unrecoverable) {
3164 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
3165 } else {
3166 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
3167 __func__);
3168 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3169 }
3170 qla4xxx_mailbox_premature_completion(ha);
3171 qla4xxx_wake_dpc(ha);
3172 }
3173
3174 /**
3175 * qla4_8xxx_watchdog - Poll dev state
3176 * @ha: Pointer to host adapter structure.
3177 *
3178 * Context: Interrupt
3179 **/
3180 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
3181 {
3182 uint32_t dev_state;
3183 uint32_t idc_ctrl;
3184
3185 /* don't poll if reset is going on */
3186 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
3187 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3188 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
3189 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
3190
3191 if (qla4_8xxx_check_temp(ha)) {
3192 if (is_qla8022(ha)) {
3193 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
3194 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
3195 CRB_NIU_XG_PAUSE_CTL_P0 |
3196 CRB_NIU_XG_PAUSE_CTL_P1);
3197 }
3198 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
3199 qla4xxx_wake_dpc(ha);
3200 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
3201 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3202
3203 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
3204 __func__);
3205
3206 if (is_qla8032(ha) || is_qla8042(ha)) {
3207 idc_ctrl = qla4_83xx_rd_reg(ha,
3208 QLA83XX_IDC_DRV_CTRL);
3209 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
3210 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
3211 __func__);
3212 qla4xxx_mailbox_premature_completion(
3213 ha);
3214 }
3215 }
3216
3217 if ((is_qla8032(ha) || is_qla8042(ha)) ||
3218 (is_qla8022(ha) && !ql4xdontresethba)) {
3219 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3220 qla4xxx_wake_dpc(ha);
3221 }
3222 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3223 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3224 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
3225 __func__);
3226 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
3227 qla4xxx_wake_dpc(ha);
3228 } else {
3229 /* Check firmware health */
3230 if (qla4_8xxx_check_fw_alive(ha))
3231 qla4_8xxx_process_fw_error(ha);
3232 }
3233 }
3234 }
3235
3236 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3237 {
3238 struct iscsi_session *sess;
3239 struct ddb_entry *ddb_entry;
3240 struct scsi_qla_host *ha;
3241
3242 sess = cls_sess->dd_data;
3243 ddb_entry = sess->dd_data;
3244 ha = ddb_entry->ha;
3245
3246 if (!(ddb_entry->ddb_type == FLASH_DDB))
3247 return;
3248
3249 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
3250 !iscsi_is_session_online(cls_sess)) {
3251 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
3252 INVALID_ENTRY) {
3253 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
3254 0) {
3255 atomic_set(&ddb_entry->retry_relogin_timer,
3256 INVALID_ENTRY);
3257 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3258 set_bit(DF_RELOGIN, &ddb_entry->flags);
3259 DEBUG2(ql4_printk(KERN_INFO, ha,
3260 "%s: index [%d] login device\n",
3261 __func__, ddb_entry->fw_ddb_index));
3262 } else
3263 atomic_dec(&ddb_entry->retry_relogin_timer);
3264 }
3265 }
3266
3267 /* Wait for relogin to timeout */
3268 if (atomic_read(&ddb_entry->relogin_timer) &&
3269 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
3270 /*
3271 * If the relogin times out and the device is
3272 * still NOT ONLINE then try and relogin again.
3273 */
3274 if (!iscsi_is_session_online(cls_sess)) {
3275 /* Reset retry relogin timer */
3276 atomic_inc(&ddb_entry->relogin_retry_count);
3277 DEBUG2(ql4_printk(KERN_INFO, ha,
3278 "%s: index[%d] relogin timed out-retrying"
3279 " relogin (%d), retry (%d)\n", __func__,
3280 ddb_entry->fw_ddb_index,
3281 atomic_read(&ddb_entry->relogin_retry_count),
3282 ddb_entry->default_time2wait + 4));
3283 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3284 atomic_set(&ddb_entry->retry_relogin_timer,
3285 ddb_entry->default_time2wait + 4);
3286 }
3287 }
3288 }
3289
3290 /**
3291 * qla4xxx_timer - checks every second for work to do.
3292 * @ha: Pointer to host adapter structure.
3293 **/
3294 static void qla4xxx_timer(struct scsi_qla_host *ha)
3295 {
3296 int start_dpc = 0;
3297 uint16_t w;
3298
3299 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
3300
3301 /* If we are in the middle of AER/EEH processing
3302 * skip any processing and reschedule the timer
3303 */
3304 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3305 mod_timer(&ha->timer, jiffies + HZ);
3306 return;
3307 }
3308
3309 /* Hardware read to trigger an EEH error during mailbox waits. */
3310 if (!pci_channel_offline(ha->pdev))
3311 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3312
3313 if (is_qla80XX(ha))
3314 qla4_8xxx_watchdog(ha);
3315
3316 if (is_qla40XX(ha)) {
3317 /* Check for heartbeat interval. */
3318 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
3319 ha->heartbeat_interval != 0) {
3320 ha->seconds_since_last_heartbeat++;
3321 if (ha->seconds_since_last_heartbeat >
3322 ha->heartbeat_interval + 2)
3323 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3324 }
3325 }
3326
3327 /* Process any deferred work. */
3328 if (!list_empty(&ha->work_list))
3329 start_dpc++;
3330
3331 /* Wakeup the dpc routine for this adapter, if needed. */
3332 if (start_dpc ||
3333 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3334 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
3335 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
3336 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3337 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3338 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
3339 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
3340 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
3341 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
3342 test_bit(DPC_AEN, &ha->dpc_flags)) {
3343 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
3344 " - dpc flags = 0x%lx\n",
3345 ha->host_no, __func__, ha->dpc_flags));
3346 qla4xxx_wake_dpc(ha);
3347 }
3348
3349 /* Reschedule timer thread to call us back in one second */
3350 mod_timer(&ha->timer, jiffies + HZ);
3351
3352 DEBUG2(ha->seconds_since_last_intr++);
3353 }
3354
3355 /**
3356 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
3357 * @ha: Pointer to host adapter structure.
3358 *
3359 * This routine stalls the driver until all outstanding commands are returned.
3360 * Caller must release the Hardware Lock prior to calling this routine.
3361 **/
3362 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
3363 {
3364 uint32_t index = 0;
3365 unsigned long flags;
3366 struct scsi_cmnd *cmd;
3367
3368 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
3369
3370 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
3371 "complete\n", WAIT_CMD_TOV));
3372
3373 while (!time_after_eq(jiffies, wtime)) {
3374 spin_lock_irqsave(&ha->hardware_lock, flags);
3375 /* Find a command that hasn't completed. */
3376 for (index = 0; index < ha->host->can_queue; index++) {
3377 cmd = scsi_host_find_tag(ha->host, index);
3378 /*
3379 * We cannot just check if the index is valid,
3380 * becase if we are run from the scsi eh, then
3381 * the scsi/block layer is going to prevent
3382 * the tag from being released.
3383 */
3384 if (cmd != NULL && CMD_SP(cmd))
3385 break;
3386 }
3387 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3388
3389 /* If No Commands are pending, wait is complete */
3390 if (index == ha->host->can_queue)
3391 return QLA_SUCCESS;
3392
3393 msleep(1000);
3394 }
3395 /* If we timed out on waiting for commands to come back
3396 * return ERROR. */
3397 return QLA_ERROR;
3398 }
3399
3400 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
3401 {
3402 uint32_t ctrl_status;
3403 unsigned long flags = 0;
3404
3405 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
3406
3407 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
3408 return QLA_ERROR;
3409
3410 spin_lock_irqsave(&ha->hardware_lock, flags);
3411
3412 /*
3413 * If the SCSI Reset Interrupt bit is set, clear it.
3414 * Otherwise, the Soft Reset won't work.
3415 */
3416 ctrl_status = readw(&ha->reg->ctrl_status);
3417 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
3418 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3419
3420 /* Issue Soft Reset */
3421 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
3422 readl(&ha->reg->ctrl_status);
3423
3424 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3425 return QLA_SUCCESS;
3426 }
3427
3428 /**
3429 * qla4xxx_soft_reset - performs soft reset.
3430 * @ha: Pointer to host adapter structure.
3431 **/
3432 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
3433 {
3434 uint32_t max_wait_time;
3435 unsigned long flags = 0;
3436 int status;
3437 uint32_t ctrl_status;
3438
3439 status = qla4xxx_hw_reset(ha);
3440 if (status != QLA_SUCCESS)
3441 return status;
3442
3443 status = QLA_ERROR;
3444 /* Wait until the Network Reset Intr bit is cleared */
3445 max_wait_time = RESET_INTR_TOV;
3446 do {
3447 spin_lock_irqsave(&ha->hardware_lock, flags);
3448 ctrl_status = readw(&ha->reg->ctrl_status);
3449 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3450
3451 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
3452 break;
3453
3454 msleep(1000);
3455 } while ((--max_wait_time));
3456
3457 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
3458 DEBUG2(printk(KERN_WARNING
3459 "scsi%ld: Network Reset Intr not cleared by "
3460 "Network function, clearing it now!\n",
3461 ha->host_no));
3462 spin_lock_irqsave(&ha->hardware_lock, flags);
3463 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
3464 readl(&ha->reg->ctrl_status);
3465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3466 }
3467
3468 /* Wait until the firmware tells us the Soft Reset is done */
3469 max_wait_time = SOFT_RESET_TOV;
3470 do {
3471 spin_lock_irqsave(&ha->hardware_lock, flags);
3472 ctrl_status = readw(&ha->reg->ctrl_status);
3473 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3474
3475 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
3476 status = QLA_SUCCESS;
3477 break;
3478 }
3479
3480 msleep(1000);
3481 } while ((--max_wait_time));
3482
3483 /*
3484 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
3485 * after the soft reset has taken place.
3486 */
3487 spin_lock_irqsave(&ha->hardware_lock, flags);
3488 ctrl_status = readw(&ha->reg->ctrl_status);
3489 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
3490 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3491 readl(&ha->reg->ctrl_status);
3492 }
3493 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3494
3495 /* If soft reset fails then most probably the bios on other
3496 * function is also enabled.
3497 * Since the initialization is sequential the other fn
3498 * wont be able to acknowledge the soft reset.
3499 * Issue a force soft reset to workaround this scenario.
3500 */
3501 if (max_wait_time == 0) {
3502 /* Issue Force Soft Reset */
3503 spin_lock_irqsave(&ha->hardware_lock, flags);
3504 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
3505 readl(&ha->reg->ctrl_status);
3506 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3507 /* Wait until the firmware tells us the Soft Reset is done */
3508 max_wait_time = SOFT_RESET_TOV;
3509 do {
3510 spin_lock_irqsave(&ha->hardware_lock, flags);
3511 ctrl_status = readw(&ha->reg->ctrl_status);
3512 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3513
3514 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
3515 status = QLA_SUCCESS;
3516 break;
3517 }
3518
3519 msleep(1000);
3520 } while ((--max_wait_time));
3521 }
3522
3523 return status;
3524 }
3525
3526 /**
3527 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
3528 * @ha: Pointer to host adapter structure.
3529 * @res: returned scsi status
3530 *
3531 * This routine is called just prior to a HARD RESET to return all
3532 * outstanding commands back to the Operating System.
3533 * Caller should make sure that the following locks are released
3534 * before this calling routine: Hardware lock, and io_request_lock.
3535 **/
3536 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
3537 {
3538 struct srb *srb;
3539 int i;
3540 unsigned long flags;
3541
3542 spin_lock_irqsave(&ha->hardware_lock, flags);
3543 for (i = 0; i < ha->host->can_queue; i++) {
3544 srb = qla4xxx_del_from_active_array(ha, i);
3545 if (srb != NULL) {
3546 srb->cmd->result = res;
3547 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3548 }
3549 }
3550 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3551 }
3552
3553 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
3554 {
3555 clear_bit(AF_ONLINE, &ha->flags);
3556
3557 /* Disable the board */
3558 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
3559
3560 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3561 qla4xxx_mark_all_devices_missing(ha);
3562 clear_bit(AF_INIT_DONE, &ha->flags);
3563 }
3564
3565 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
3566 {
3567 struct iscsi_session *sess;
3568 struct ddb_entry *ddb_entry;
3569
3570 sess = cls_session->dd_data;
3571 ddb_entry = sess->dd_data;
3572 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
3573
3574 if (ddb_entry->ddb_type == FLASH_DDB)
3575 iscsi_block_session(ddb_entry->sess);
3576 else
3577 iscsi_session_failure(cls_session->dd_data,
3578 ISCSI_ERR_CONN_FAILED);
3579 }
3580
3581 /**
3582 * qla4xxx_recover_adapter - recovers adapter after a fatal error
3583 * @ha: Pointer to host adapter structure.
3584 **/
3585 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3586 {
3587 int status = QLA_ERROR;
3588 uint8_t reset_chip = 0;
3589 uint32_t dev_state;
3590 unsigned long wait;
3591
3592 /* Stall incoming I/O until we are done */
3593 scsi_block_requests(ha->host);
3594 clear_bit(AF_ONLINE, &ha->flags);
3595 clear_bit(AF_LINK_UP, &ha->flags);
3596
3597 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
3598
3599 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3600
3601 if ((is_qla8032(ha) || is_qla8042(ha)) &&
3602 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3603 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3604 __func__);
3605 /* disable pause frame for ISP83xx */
3606 qla4_83xx_disable_pause(ha);
3607 }
3608
3609 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3610
3611 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3612 reset_chip = 1;
3613
3614 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
3615 * do not reset adapter, jump to initialize_adapter */
3616 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3617 status = QLA_SUCCESS;
3618 goto recover_ha_init_adapter;
3619 }
3620
3621 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
3622 * from eh_host_reset or ioctl module */
3623 if (is_qla80XX(ha) && !reset_chip &&
3624 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3625
3626 DEBUG2(ql4_printk(KERN_INFO, ha,
3627 "scsi%ld: %s - Performing stop_firmware...\n",
3628 ha->host_no, __func__));
3629 status = ha->isp_ops->reset_firmware(ha);
3630 if (status == QLA_SUCCESS) {
3631 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3632 qla4xxx_cmd_wait(ha);
3633
3634 ha->isp_ops->disable_intrs(ha);
3635 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3636 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3637 } else {
3638 /* If the stop_firmware fails then
3639 * reset the entire chip */
3640 reset_chip = 1;
3641 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3642 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3643 }
3644 }
3645
3646 /* Issue full chip reset if recovering from a catastrophic error,
3647 * or if stop_firmware fails for ISP-8xxx.
3648 * This is the default case for ISP-4xxx */
3649 if (is_qla40XX(ha) || reset_chip) {
3650 if (is_qla40XX(ha))
3651 goto chip_reset;
3652
3653 /* Check if 8XXX firmware is alive or not
3654 * We may have arrived here from NEED_RESET
3655 * detection only */
3656 if (test_bit(AF_FW_RECOVERY, &ha->flags))
3657 goto chip_reset;
3658
3659 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
3660 while (time_before(jiffies, wait)) {
3661 if (qla4_8xxx_check_fw_alive(ha)) {
3662 qla4xxx_mailbox_premature_completion(ha);
3663 break;
3664 }
3665
3666 set_current_state(TASK_UNINTERRUPTIBLE);
3667 schedule_timeout(HZ);
3668 }
3669 chip_reset:
3670 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3671 qla4xxx_cmd_wait(ha);
3672
3673 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3674 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3675 DEBUG2(ql4_printk(KERN_INFO, ha,
3676 "scsi%ld: %s - Performing chip reset..\n",
3677 ha->host_no, __func__));
3678 status = ha->isp_ops->reset_chip(ha);
3679 }
3680
3681 /* Flush any pending ddb changed AENs */
3682 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3683
3684 recover_ha_init_adapter:
3685 /* Upon successful firmware/chip reset, re-initialize the adapter */
3686 if (status == QLA_SUCCESS) {
3687 /* For ISP-4xxx, force function 1 to always initialize
3688 * before function 3 to prevent both funcions from
3689 * stepping on top of the other */
3690 if (is_qla40XX(ha) && (ha->mac_index == 3))
3691 ssleep(6);
3692
3693 /* NOTE: AF_ONLINE flag set upon successful completion of
3694 * qla4xxx_initialize_adapter */
3695 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3696 }
3697
3698 /* Retry failed adapter initialization, if necessary
3699 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3700 * case to prevent ping-pong resets between functions */
3701 if (!test_bit(AF_ONLINE, &ha->flags) &&
3702 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3703 /* Adapter initialization failed, see if we can retry
3704 * resetting the ha.
3705 * Since we don't want to block the DPC for too long
3706 * with multiple resets in the same thread,
3707 * utilize DPC to retry */
3708 if (is_qla80XX(ha)) {
3709 ha->isp_ops->idc_lock(ha);
3710 dev_state = qla4_8xxx_rd_direct(ha,
3711 QLA8XXX_CRB_DEV_STATE);
3712 ha->isp_ops->idc_unlock(ha);
3713 if (dev_state == QLA8XXX_DEV_FAILED) {
3714 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3715 "recover adapter. H/W is in Failed "
3716 "state\n", __func__);
3717 qla4xxx_dead_adapter_cleanup(ha);
3718 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3719 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3720 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3721 &ha->dpc_flags);
3722 status = QLA_ERROR;
3723
3724 goto exit_recover;
3725 }
3726 }
3727
3728 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3729 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3730 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3731 "(%d) more times\n", ha->host_no,
3732 ha->retry_reset_ha_cnt));
3733 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3734 status = QLA_ERROR;
3735 } else {
3736 if (ha->retry_reset_ha_cnt > 0) {
3737 /* Schedule another Reset HA--DPC will retry */
3738 ha->retry_reset_ha_cnt--;
3739 DEBUG2(printk("scsi%ld: recover adapter - "
3740 "retry remaining %d\n",
3741 ha->host_no,
3742 ha->retry_reset_ha_cnt));
3743 status = QLA_ERROR;
3744 }
3745
3746 if (ha->retry_reset_ha_cnt == 0) {
3747 /* Recover adapter retries have been exhausted.
3748 * Adapter DEAD */
3749 DEBUG2(printk("scsi%ld: recover adapter "
3750 "failed - board disabled\n",
3751 ha->host_no));
3752 qla4xxx_dead_adapter_cleanup(ha);
3753 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3754 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3755 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3756 &ha->dpc_flags);
3757 status = QLA_ERROR;
3758 }
3759 }
3760 } else {
3761 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3762 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3763 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3764 }
3765
3766 exit_recover:
3767 ha->adapter_error_count++;
3768
3769 if (test_bit(AF_ONLINE, &ha->flags))
3770 ha->isp_ops->enable_intrs(ha);
3771
3772 scsi_unblock_requests(ha->host);
3773
3774 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3775 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3776 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3777
3778 return status;
3779 }
3780
3781 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3782 {
3783 struct iscsi_session *sess;
3784 struct ddb_entry *ddb_entry;
3785 struct scsi_qla_host *ha;
3786
3787 sess = cls_session->dd_data;
3788 ddb_entry = sess->dd_data;
3789 ha = ddb_entry->ha;
3790 if (!iscsi_is_session_online(cls_session)) {
3791 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3792 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3793 " unblock session\n", ha->host_no, __func__,
3794 ddb_entry->fw_ddb_index);
3795 iscsi_unblock_session(ddb_entry->sess);
3796 } else {
3797 /* Trigger relogin */
3798 if (ddb_entry->ddb_type == FLASH_DDB) {
3799 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
3800 test_bit(DF_DISABLE_RELOGIN,
3801 &ddb_entry->flags)))
3802 qla4xxx_arm_relogin_timer(ddb_entry);
3803 } else
3804 iscsi_session_failure(cls_session->dd_data,
3805 ISCSI_ERR_CONN_FAILED);
3806 }
3807 }
3808 }
3809
3810 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3811 {
3812 struct iscsi_session *sess;
3813 struct ddb_entry *ddb_entry;
3814 struct scsi_qla_host *ha;
3815
3816 sess = cls_session->dd_data;
3817 ddb_entry = sess->dd_data;
3818 ha = ddb_entry->ha;
3819 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3820 " unblock session\n", ha->host_no, __func__,
3821 ddb_entry->fw_ddb_index);
3822
3823 iscsi_unblock_session(ddb_entry->sess);
3824
3825 /* Start scan target */
3826 if (test_bit(AF_ONLINE, &ha->flags)) {
3827 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3828 " start scan\n", ha->host_no, __func__,
3829 ddb_entry->fw_ddb_index);
3830 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3831 }
3832 return QLA_SUCCESS;
3833 }
3834
3835 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3836 {
3837 struct iscsi_session *sess;
3838 struct ddb_entry *ddb_entry;
3839 struct scsi_qla_host *ha;
3840 int status = QLA_SUCCESS;
3841
3842 sess = cls_session->dd_data;
3843 ddb_entry = sess->dd_data;
3844 ha = ddb_entry->ha;
3845 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3846 " unblock user space session\n", ha->host_no, __func__,
3847 ddb_entry->fw_ddb_index);
3848
3849 if (!iscsi_is_session_online(cls_session)) {
3850 iscsi_conn_start(ddb_entry->conn);
3851 iscsi_conn_login_event(ddb_entry->conn,
3852 ISCSI_CONN_STATE_LOGGED_IN);
3853 } else {
3854 ql4_printk(KERN_INFO, ha,
3855 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3856 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3857 cls_session->sid);
3858 status = QLA_ERROR;
3859 }
3860
3861 return status;
3862 }
3863
3864 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3865 {
3866 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3867 }
3868
3869 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3870 {
3871 uint16_t relogin_timer;
3872 struct iscsi_session *sess;
3873 struct ddb_entry *ddb_entry;
3874 struct scsi_qla_host *ha;
3875
3876 sess = cls_sess->dd_data;
3877 ddb_entry = sess->dd_data;
3878 ha = ddb_entry->ha;
3879
3880 relogin_timer = max(ddb_entry->default_relogin_timeout,
3881 (uint16_t)RELOGIN_TOV);
3882 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3883
3884 DEBUG2(ql4_printk(KERN_INFO, ha,
3885 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3886 ddb_entry->fw_ddb_index, relogin_timer));
3887
3888 qla4xxx_login_flash_ddb(cls_sess);
3889 }
3890
3891 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3892 {
3893 struct iscsi_session *sess;
3894 struct ddb_entry *ddb_entry;
3895 struct scsi_qla_host *ha;
3896
3897 sess = cls_sess->dd_data;
3898 ddb_entry = sess->dd_data;
3899 ha = ddb_entry->ha;
3900
3901 if (!(ddb_entry->ddb_type == FLASH_DDB))
3902 return;
3903
3904 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
3905 return;
3906
3907 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3908 !iscsi_is_session_online(cls_sess)) {
3909 DEBUG2(ql4_printk(KERN_INFO, ha,
3910 "relogin issued\n"));
3911 qla4xxx_relogin_flash_ddb(cls_sess);
3912 }
3913 }
3914
3915 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3916 {
3917 if (ha->dpc_thread)
3918 queue_work(ha->dpc_thread, &ha->dpc_work);
3919 }
3920
3921 static struct qla4_work_evt *
3922 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3923 enum qla4_work_type type)
3924 {
3925 struct qla4_work_evt *e;
3926 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3927
3928 e = kzalloc(size, GFP_ATOMIC);
3929 if (!e)
3930 return NULL;
3931
3932 INIT_LIST_HEAD(&e->list);
3933 e->type = type;
3934 return e;
3935 }
3936
3937 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3938 struct qla4_work_evt *e)
3939 {
3940 unsigned long flags;
3941
3942 spin_lock_irqsave(&ha->work_lock, flags);
3943 list_add_tail(&e->list, &ha->work_list);
3944 spin_unlock_irqrestore(&ha->work_lock, flags);
3945 qla4xxx_wake_dpc(ha);
3946 }
3947
3948 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3949 enum iscsi_host_event_code aen_code,
3950 uint32_t data_size, uint8_t *data)
3951 {
3952 struct qla4_work_evt *e;
3953
3954 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3955 if (!e)
3956 return QLA_ERROR;
3957
3958 e->u.aen.code = aen_code;
3959 e->u.aen.data_size = data_size;
3960 memcpy(e->u.aen.data, data, data_size);
3961
3962 qla4xxx_post_work(ha, e);
3963
3964 return QLA_SUCCESS;
3965 }
3966
3967 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3968 uint32_t status, uint32_t pid,
3969 uint32_t data_size, uint8_t *data)
3970 {
3971 struct qla4_work_evt *e;
3972
3973 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3974 if (!e)
3975 return QLA_ERROR;
3976
3977 e->u.ping.status = status;
3978 e->u.ping.pid = pid;
3979 e->u.ping.data_size = data_size;
3980 memcpy(e->u.ping.data, data, data_size);
3981
3982 qla4xxx_post_work(ha, e);
3983
3984 return QLA_SUCCESS;
3985 }
3986
3987 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3988 {
3989 struct qla4_work_evt *e, *tmp;
3990 unsigned long flags;
3991 LIST_HEAD(work);
3992
3993 spin_lock_irqsave(&ha->work_lock, flags);
3994 list_splice_init(&ha->work_list, &work);
3995 spin_unlock_irqrestore(&ha->work_lock, flags);
3996
3997 list_for_each_entry_safe(e, tmp, &work, list) {
3998 list_del_init(&e->list);
3999
4000 switch (e->type) {
4001 case QLA4_EVENT_AEN:
4002 iscsi_post_host_event(ha->host_no,
4003 &qla4xxx_iscsi_transport,
4004 e->u.aen.code,
4005 e->u.aen.data_size,
4006 e->u.aen.data);
4007 break;
4008 case QLA4_EVENT_PING_STATUS:
4009 iscsi_ping_comp_event(ha->host_no,
4010 &qla4xxx_iscsi_transport,
4011 e->u.ping.status,
4012 e->u.ping.pid,
4013 e->u.ping.data_size,
4014 e->u.ping.data);
4015 break;
4016 default:
4017 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
4018 "supported", e->type);
4019 }
4020 kfree(e);
4021 }
4022 }
4023
4024 /**
4025 * qla4xxx_do_dpc - dpc routine
4026 * @data: in our case pointer to adapter structure
4027 *
4028 * This routine is a task that is schedule by the interrupt handler
4029 * to perform the background processing for interrupts. We put it
4030 * on a task queue that is consumed whenever the scheduler runs; that's
4031 * so you can do anything (i.e. put the process to sleep etc). In fact,
4032 * the mid-level tries to sleep when it reaches the driver threshold
4033 * "host->can_queue". This can cause a panic if we were in our interrupt code.
4034 **/
4035 static void qla4xxx_do_dpc(struct work_struct *work)
4036 {
4037 struct scsi_qla_host *ha =
4038 container_of(work, struct scsi_qla_host, dpc_work);
4039 int status = QLA_ERROR;
4040
4041 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
4042 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
4043 ha->host_no, __func__, ha->flags, ha->dpc_flags))
4044
4045 /* Initialization not yet finished. Don't do anything yet. */
4046 if (!test_bit(AF_INIT_DONE, &ha->flags))
4047 return;
4048
4049 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4050 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
4051 ha->host_no, __func__, ha->flags));
4052 return;
4053 }
4054
4055 /* post events to application */
4056 qla4xxx_do_work(ha);
4057
4058 if (is_qla80XX(ha)) {
4059 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
4060 if (is_qla8032(ha) || is_qla8042(ha)) {
4061 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4062 __func__);
4063 /* disable pause frame for ISP83xx */
4064 qla4_83xx_disable_pause(ha);
4065 }
4066
4067 ha->isp_ops->idc_lock(ha);
4068 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
4069 QLA8XXX_DEV_FAILED);
4070 ha->isp_ops->idc_unlock(ha);
4071 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
4072 qla4_8xxx_device_state_handler(ha);
4073 }
4074
4075 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
4076 if (is_qla8042(ha)) {
4077 if (ha->idc_info.info2 &
4078 ENABLE_INTERNAL_LOOPBACK) {
4079 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
4080 __func__);
4081 status = qla4_84xx_config_acb(ha,
4082 ACB_CONFIG_DISABLE);
4083 if (status != QLA_SUCCESS) {
4084 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
4085 __func__);
4086 }
4087 }
4088 }
4089 qla4_83xx_post_idc_ack(ha);
4090 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
4091 }
4092
4093 if (is_qla8042(ha) &&
4094 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
4095 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
4096 __func__);
4097 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
4098 QLA_SUCCESS) {
4099 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
4100 __func__);
4101 }
4102 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
4103 }
4104
4105 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
4106 qla4_8xxx_need_qsnt_handler(ha);
4107 }
4108 }
4109
4110 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
4111 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4112 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4113 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
4114 if ((is_qla8022(ha) && ql4xdontresethba) ||
4115 ((is_qla8032(ha) || is_qla8042(ha)) &&
4116 qla4_83xx_idc_dontreset(ha))) {
4117 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
4118 ha->host_no, __func__));
4119 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4120 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
4121 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
4122 goto dpc_post_reset_ha;
4123 }
4124 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
4125 test_bit(DPC_RESET_HA, &ha->dpc_flags))
4126 qla4xxx_recover_adapter(ha);
4127
4128 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4129 uint8_t wait_time = RESET_INTR_TOV;
4130
4131 while ((readw(&ha->reg->ctrl_status) &
4132 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
4133 if (--wait_time == 0)
4134 break;
4135 msleep(1000);
4136 }
4137 if (wait_time == 0)
4138 DEBUG2(printk("scsi%ld: %s: SR|FSR "
4139 "bit not cleared-- resetting\n",
4140 ha->host_no, __func__));
4141 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4142 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
4143 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4144 status = qla4xxx_recover_adapter(ha);
4145 }
4146 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
4147 if (status == QLA_SUCCESS)
4148 ha->isp_ops->enable_intrs(ha);
4149 }
4150 }
4151
4152 dpc_post_reset_ha:
4153 /* ---- process AEN? --- */
4154 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
4155 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
4156
4157 /* ---- Get DHCP IP Address? --- */
4158 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
4159 qla4xxx_get_dhcp_ip_address(ha);
4160
4161 /* ---- relogin device? --- */
4162 if (adapter_up(ha) &&
4163 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
4164 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
4165 }
4166
4167 /* ---- link change? --- */
4168 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
4169 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
4170 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4171 /* ---- link down? --- */
4172 qla4xxx_mark_all_devices_missing(ha);
4173 } else {
4174 /* ---- link up? --- *
4175 * F/W will auto login to all devices ONLY ONCE after
4176 * link up during driver initialization and runtime
4177 * fatal error recovery. Therefore, the driver must
4178 * manually relogin to devices when recovering from
4179 * connection failures, logouts, expired KATO, etc. */
4180 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
4181 qla4xxx_build_ddb_list(ha, ha->is_reset);
4182 iscsi_host_for_each_session(ha->host,
4183 qla4xxx_login_flash_ddb);
4184 } else
4185 qla4xxx_relogin_all_devices(ha);
4186 }
4187 }
4188 }
4189
4190 /**
4191 * qla4xxx_free_adapter - release the adapter
4192 * @ha: pointer to adapter structure
4193 **/
4194 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
4195 {
4196 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4197
4198 /* Turn-off interrupts on the card. */
4199 ha->isp_ops->disable_intrs(ha);
4200
4201 if (is_qla40XX(ha)) {
4202 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
4203 &ha->reg->ctrl_status);
4204 readl(&ha->reg->ctrl_status);
4205 } else if (is_qla8022(ha)) {
4206 writel(0, &ha->qla4_82xx_reg->host_int);
4207 readl(&ha->qla4_82xx_reg->host_int);
4208 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4209 writel(0, &ha->qla4_83xx_reg->risc_intr);
4210 readl(&ha->qla4_83xx_reg->risc_intr);
4211 }
4212
4213 /* Remove timer thread, if present */
4214 if (ha->timer_active)
4215 qla4xxx_stop_timer(ha);
4216
4217 /* Kill the kernel thread for this host */
4218 if (ha->dpc_thread)
4219 destroy_workqueue(ha->dpc_thread);
4220
4221 /* Kill the kernel thread for this host */
4222 if (ha->task_wq)
4223 destroy_workqueue(ha->task_wq);
4224
4225 /* Put firmware in known state */
4226 ha->isp_ops->reset_firmware(ha);
4227
4228 if (is_qla80XX(ha)) {
4229 ha->isp_ops->idc_lock(ha);
4230 qla4_8xxx_clear_drv_active(ha);
4231 ha->isp_ops->idc_unlock(ha);
4232 }
4233
4234 /* Detach interrupts */
4235 qla4xxx_free_irqs(ha);
4236
4237 /* free extra memory */
4238 qla4xxx_mem_free(ha);
4239 }
4240
4241 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
4242 {
4243 int status = 0;
4244 unsigned long mem_base, mem_len, db_base, db_len;
4245 struct pci_dev *pdev = ha->pdev;
4246
4247 status = pci_request_regions(pdev, DRIVER_NAME);
4248 if (status) {
4249 printk(KERN_WARNING
4250 "scsi(%ld) Failed to reserve PIO regions (%s) "
4251 "status=%d\n", ha->host_no, pci_name(pdev), status);
4252 goto iospace_error_exit;
4253 }
4254
4255 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
4256 __func__, pdev->revision));
4257 ha->revision_id = pdev->revision;
4258
4259 /* remap phys address */
4260 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
4261 mem_len = pci_resource_len(pdev, 0);
4262 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
4263 __func__, mem_base, mem_len));
4264
4265 /* mapping of pcibase pointer */
4266 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
4267 if (!ha->nx_pcibase) {
4268 printk(KERN_ERR
4269 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
4270 pci_release_regions(ha->pdev);
4271 goto iospace_error_exit;
4272 }
4273
4274 /* Mapping of IO base pointer, door bell read and write pointer */
4275
4276 /* mapping of IO base pointer */
4277 if (is_qla8022(ha)) {
4278 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
4279 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
4280 (ha->pdev->devfn << 11));
4281 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
4282 QLA82XX_CAM_RAM_DB2);
4283 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4284 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
4285 ((uint8_t *)ha->nx_pcibase);
4286 }
4287
4288 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
4289 db_len = pci_resource_len(pdev, 4);
4290
4291 return 0;
4292 iospace_error_exit:
4293 return -ENOMEM;
4294 }
4295
4296 /***
4297 * qla4xxx_iospace_config - maps registers
4298 * @ha: pointer to adapter structure
4299 *
4300 * This routines maps HBA's registers from the pci address space
4301 * into the kernel virtual address space for memory mapped i/o.
4302 **/
4303 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
4304 {
4305 unsigned long pio, pio_len, pio_flags;
4306 unsigned long mmio, mmio_len, mmio_flags;
4307
4308 pio = pci_resource_start(ha->pdev, 0);
4309 pio_len = pci_resource_len(ha->pdev, 0);
4310 pio_flags = pci_resource_flags(ha->pdev, 0);
4311 if (pio_flags & IORESOURCE_IO) {
4312 if (pio_len < MIN_IOBASE_LEN) {
4313 ql4_printk(KERN_WARNING, ha,
4314 "Invalid PCI I/O region size\n");
4315 pio = 0;
4316 }
4317 } else {
4318 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
4319 pio = 0;
4320 }
4321
4322 /* Use MMIO operations for all accesses. */
4323 mmio = pci_resource_start(ha->pdev, 1);
4324 mmio_len = pci_resource_len(ha->pdev, 1);
4325 mmio_flags = pci_resource_flags(ha->pdev, 1);
4326
4327 if (!(mmio_flags & IORESOURCE_MEM)) {
4328 ql4_printk(KERN_ERR, ha,
4329 "region #0 not an MMIO resource, aborting\n");
4330
4331 goto iospace_error_exit;
4332 }
4333
4334 if (mmio_len < MIN_IOBASE_LEN) {
4335 ql4_printk(KERN_ERR, ha,
4336 "Invalid PCI mem region size, aborting\n");
4337 goto iospace_error_exit;
4338 }
4339
4340 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
4341 ql4_printk(KERN_WARNING, ha,
4342 "Failed to reserve PIO/MMIO regions\n");
4343
4344 goto iospace_error_exit;
4345 }
4346
4347 ha->pio_address = pio;
4348 ha->pio_length = pio_len;
4349 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
4350 if (!ha->reg) {
4351 ql4_printk(KERN_ERR, ha,
4352 "cannot remap MMIO, aborting\n");
4353
4354 goto iospace_error_exit;
4355 }
4356
4357 return 0;
4358
4359 iospace_error_exit:
4360 return -ENOMEM;
4361 }
4362
4363 static struct isp_operations qla4xxx_isp_ops = {
4364 .iospace_config = qla4xxx_iospace_config,
4365 .pci_config = qla4xxx_pci_config,
4366 .disable_intrs = qla4xxx_disable_intrs,
4367 .enable_intrs = qla4xxx_enable_intrs,
4368 .start_firmware = qla4xxx_start_firmware,
4369 .intr_handler = qla4xxx_intr_handler,
4370 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
4371 .reset_chip = qla4xxx_soft_reset,
4372 .reset_firmware = qla4xxx_hw_reset,
4373 .queue_iocb = qla4xxx_queue_iocb,
4374 .complete_iocb = qla4xxx_complete_iocb,
4375 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
4376 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
4377 .get_sys_info = qla4xxx_get_sys_info,
4378 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
4379 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
4380 };
4381
4382 static struct isp_operations qla4_82xx_isp_ops = {
4383 .iospace_config = qla4_8xxx_iospace_config,
4384 .pci_config = qla4_8xxx_pci_config,
4385 .disable_intrs = qla4_82xx_disable_intrs,
4386 .enable_intrs = qla4_82xx_enable_intrs,
4387 .start_firmware = qla4_8xxx_load_risc,
4388 .restart_firmware = qla4_82xx_try_start_fw,
4389 .intr_handler = qla4_82xx_intr_handler,
4390 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
4391 .need_reset = qla4_8xxx_need_reset,
4392 .reset_chip = qla4_82xx_isp_reset,
4393 .reset_firmware = qla4_8xxx_stop_firmware,
4394 .queue_iocb = qla4_82xx_queue_iocb,
4395 .complete_iocb = qla4_82xx_complete_iocb,
4396 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
4397 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
4398 .get_sys_info = qla4_8xxx_get_sys_info,
4399 .rd_reg_direct = qla4_82xx_rd_32,
4400 .wr_reg_direct = qla4_82xx_wr_32,
4401 .rd_reg_indirect = qla4_82xx_md_rd_32,
4402 .wr_reg_indirect = qla4_82xx_md_wr_32,
4403 .idc_lock = qla4_82xx_idc_lock,
4404 .idc_unlock = qla4_82xx_idc_unlock,
4405 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
4406 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
4407 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
4408 };
4409
4410 static struct isp_operations qla4_83xx_isp_ops = {
4411 .iospace_config = qla4_8xxx_iospace_config,
4412 .pci_config = qla4_8xxx_pci_config,
4413 .disable_intrs = qla4_83xx_disable_intrs,
4414 .enable_intrs = qla4_83xx_enable_intrs,
4415 .start_firmware = qla4_8xxx_load_risc,
4416 .restart_firmware = qla4_83xx_start_firmware,
4417 .intr_handler = qla4_83xx_intr_handler,
4418 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
4419 .need_reset = qla4_8xxx_need_reset,
4420 .reset_chip = qla4_83xx_isp_reset,
4421 .reset_firmware = qla4_8xxx_stop_firmware,
4422 .queue_iocb = qla4_83xx_queue_iocb,
4423 .complete_iocb = qla4_83xx_complete_iocb,
4424 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
4425 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
4426 .get_sys_info = qla4_8xxx_get_sys_info,
4427 .rd_reg_direct = qla4_83xx_rd_reg,
4428 .wr_reg_direct = qla4_83xx_wr_reg,
4429 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
4430 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
4431 .idc_lock = qla4_83xx_drv_lock,
4432 .idc_unlock = qla4_83xx_drv_unlock,
4433 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
4434 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
4435 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
4436 };
4437
4438 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4439 {
4440 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
4441 }
4442
4443 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4444 {
4445 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
4446 }
4447
4448 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4449 {
4450 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
4451 }
4452
4453 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4454 {
4455 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
4456 }
4457
4458 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
4459 {
4460 struct scsi_qla_host *ha = data;
4461 char *str = buf;
4462 int rc;
4463
4464 switch (type) {
4465 case ISCSI_BOOT_ETH_FLAGS:
4466 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4467 break;
4468 case ISCSI_BOOT_ETH_INDEX:
4469 rc = sprintf(str, "0\n");
4470 break;
4471 case ISCSI_BOOT_ETH_MAC:
4472 rc = sysfs_format_mac(str, ha->my_mac,
4473 MAC_ADDR_LEN);
4474 break;
4475 default:
4476 rc = -ENOSYS;
4477 break;
4478 }
4479 return rc;
4480 }
4481
4482 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
4483 {
4484 int rc;
4485
4486 switch (type) {
4487 case ISCSI_BOOT_ETH_FLAGS:
4488 case ISCSI_BOOT_ETH_MAC:
4489 case ISCSI_BOOT_ETH_INDEX:
4490 rc = S_IRUGO;
4491 break;
4492 default:
4493 rc = 0;
4494 break;
4495 }
4496 return rc;
4497 }
4498
4499 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
4500 {
4501 struct scsi_qla_host *ha = data;
4502 char *str = buf;
4503 int rc;
4504
4505 switch (type) {
4506 case ISCSI_BOOT_INI_INITIATOR_NAME:
4507 rc = sprintf(str, "%s\n", ha->name_string);
4508 break;
4509 default:
4510 rc = -ENOSYS;
4511 break;
4512 }
4513 return rc;
4514 }
4515
4516 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
4517 {
4518 int rc;
4519
4520 switch (type) {
4521 case ISCSI_BOOT_INI_INITIATOR_NAME:
4522 rc = S_IRUGO;
4523 break;
4524 default:
4525 rc = 0;
4526 break;
4527 }
4528 return rc;
4529 }
4530
4531 static ssize_t
4532 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
4533 char *buf)
4534 {
4535 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4536 char *str = buf;
4537 int rc;
4538
4539 switch (type) {
4540 case ISCSI_BOOT_TGT_NAME:
4541 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
4542 break;
4543 case ISCSI_BOOT_TGT_IP_ADDR:
4544 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
4545 rc = sprintf(buf, "%pI4\n",
4546 &boot_conn->dest_ipaddr.ip_address);
4547 else
4548 rc = sprintf(str, "%pI6\n",
4549 &boot_conn->dest_ipaddr.ip_address);
4550 break;
4551 case ISCSI_BOOT_TGT_PORT:
4552 rc = sprintf(str, "%d\n", boot_conn->dest_port);
4553 break;
4554 case ISCSI_BOOT_TGT_CHAP_NAME:
4555 rc = sprintf(str, "%.*s\n",
4556 boot_conn->chap.target_chap_name_length,
4557 (char *)&boot_conn->chap.target_chap_name);
4558 break;
4559 case ISCSI_BOOT_TGT_CHAP_SECRET:
4560 rc = sprintf(str, "%.*s\n",
4561 boot_conn->chap.target_secret_length,
4562 (char *)&boot_conn->chap.target_secret);
4563 break;
4564 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4565 rc = sprintf(str, "%.*s\n",
4566 boot_conn->chap.intr_chap_name_length,
4567 (char *)&boot_conn->chap.intr_chap_name);
4568 break;
4569 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4570 rc = sprintf(str, "%.*s\n",
4571 boot_conn->chap.intr_secret_length,
4572 (char *)&boot_conn->chap.intr_secret);
4573 break;
4574 case ISCSI_BOOT_TGT_FLAGS:
4575 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4576 break;
4577 case ISCSI_BOOT_TGT_NIC_ASSOC:
4578 rc = sprintf(str, "0\n");
4579 break;
4580 default:
4581 rc = -ENOSYS;
4582 break;
4583 }
4584 return rc;
4585 }
4586
4587 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
4588 {
4589 struct scsi_qla_host *ha = data;
4590 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
4591
4592 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4593 }
4594
4595 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
4596 {
4597 struct scsi_qla_host *ha = data;
4598 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
4599
4600 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4601 }
4602
4603 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
4604 {
4605 int rc;
4606
4607 switch (type) {
4608 case ISCSI_BOOT_TGT_NAME:
4609 case ISCSI_BOOT_TGT_IP_ADDR:
4610 case ISCSI_BOOT_TGT_PORT:
4611 case ISCSI_BOOT_TGT_CHAP_NAME:
4612 case ISCSI_BOOT_TGT_CHAP_SECRET:
4613 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4614 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4615 case ISCSI_BOOT_TGT_NIC_ASSOC:
4616 case ISCSI_BOOT_TGT_FLAGS:
4617 rc = S_IRUGO;
4618 break;
4619 default:
4620 rc = 0;
4621 break;
4622 }
4623 return rc;
4624 }
4625
4626 static void qla4xxx_boot_release(void *data)
4627 {
4628 struct scsi_qla_host *ha = data;
4629
4630 scsi_host_put(ha->host);
4631 }
4632
4633 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4634 {
4635 dma_addr_t buf_dma;
4636 uint32_t addr, pri_addr, sec_addr;
4637 uint32_t offset;
4638 uint16_t func_num;
4639 uint8_t val;
4640 uint8_t *buf = NULL;
4641 size_t size = 13 * sizeof(uint8_t);
4642 int ret = QLA_SUCCESS;
4643
4644 func_num = PCI_FUNC(ha->pdev->devfn);
4645
4646 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
4647 __func__, ha->pdev->device, func_num);
4648
4649 if (is_qla40XX(ha)) {
4650 if (func_num == 1) {
4651 addr = NVRAM_PORT0_BOOT_MODE;
4652 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
4653 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
4654 } else if (func_num == 3) {
4655 addr = NVRAM_PORT1_BOOT_MODE;
4656 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
4657 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
4658 } else {
4659 ret = QLA_ERROR;
4660 goto exit_boot_info;
4661 }
4662
4663 /* Check Boot Mode */
4664 val = rd_nvram_byte(ha, addr);
4665 if (!(val & 0x07)) {
4666 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
4667 "options : 0x%x\n", __func__, val));
4668 ret = QLA_ERROR;
4669 goto exit_boot_info;
4670 }
4671
4672 /* get primary valid target index */
4673 val = rd_nvram_byte(ha, pri_addr);
4674 if (val & BIT_7)
4675 ddb_index[0] = (val & 0x7f);
4676
4677 /* get secondary valid target index */
4678 val = rd_nvram_byte(ha, sec_addr);
4679 if (val & BIT_7)
4680 ddb_index[1] = (val & 0x7f);
4681
4682 } else if (is_qla80XX(ha)) {
4683 buf = dma_alloc_coherent(&ha->pdev->dev, size,
4684 &buf_dma, GFP_KERNEL);
4685 if (!buf) {
4686 DEBUG2(ql4_printk(KERN_ERR, ha,
4687 "%s: Unable to allocate dma buffer\n",
4688 __func__));
4689 ret = QLA_ERROR;
4690 goto exit_boot_info;
4691 }
4692
4693 if (ha->port_num == 0)
4694 offset = BOOT_PARAM_OFFSET_PORT0;
4695 else if (ha->port_num == 1)
4696 offset = BOOT_PARAM_OFFSET_PORT1;
4697 else {
4698 ret = QLA_ERROR;
4699 goto exit_boot_info_free;
4700 }
4701 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
4702 offset;
4703 if (qla4xxx_get_flash(ha, buf_dma, addr,
4704 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
4705 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
4706 " failed\n", ha->host_no, __func__));
4707 ret = QLA_ERROR;
4708 goto exit_boot_info_free;
4709 }
4710 /* Check Boot Mode */
4711 if (!(buf[1] & 0x07)) {
4712 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
4713 " : 0x%x\n", buf[1]));
4714 ret = QLA_ERROR;
4715 goto exit_boot_info_free;
4716 }
4717
4718 /* get primary valid target index */
4719 if (buf[2] & BIT_7)
4720 ddb_index[0] = buf[2] & 0x7f;
4721
4722 /* get secondary valid target index */
4723 if (buf[11] & BIT_7)
4724 ddb_index[1] = buf[11] & 0x7f;
4725 } else {
4726 ret = QLA_ERROR;
4727 goto exit_boot_info;
4728 }
4729
4730 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
4731 " target ID %d\n", __func__, ddb_index[0],
4732 ddb_index[1]));
4733
4734 exit_boot_info_free:
4735 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
4736 exit_boot_info:
4737 ha->pri_ddb_idx = ddb_index[0];
4738 ha->sec_ddb_idx = ddb_index[1];
4739 return ret;
4740 }
4741
4742 /**
4743 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4744 * @ha: pointer to adapter structure
4745 * @username: CHAP username to be returned
4746 * @password: CHAP password to be returned
4747 *
4748 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4749 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4750 * So from the CHAP cache find the first BIDI CHAP entry and set it
4751 * to the boot record in sysfs.
4752 **/
4753 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4754 char *password)
4755 {
4756 int i, ret = -EINVAL;
4757 int max_chap_entries = 0;
4758 struct ql4_chap_table *chap_table;
4759
4760 if (is_qla80XX(ha))
4761 max_chap_entries = (ha->hw.flt_chap_size / 2) /
4762 sizeof(struct ql4_chap_table);
4763 else
4764 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4765
4766 if (!ha->chap_list) {
4767 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4768 return ret;
4769 }
4770
4771 mutex_lock(&ha->chap_sem);
4772 for (i = 0; i < max_chap_entries; i++) {
4773 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4774 if (chap_table->cookie !=
4775 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4776 continue;
4777 }
4778
4779 if (chap_table->flags & BIT_7) /* local */
4780 continue;
4781
4782 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4783 continue;
4784
4785 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4786 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4787 ret = 0;
4788 break;
4789 }
4790 mutex_unlock(&ha->chap_sem);
4791
4792 return ret;
4793 }
4794
4795
4796 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4797 struct ql4_boot_session_info *boot_sess,
4798 uint16_t ddb_index)
4799 {
4800 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4801 struct dev_db_entry *fw_ddb_entry;
4802 dma_addr_t fw_ddb_entry_dma;
4803 uint16_t idx;
4804 uint16_t options;
4805 int ret = QLA_SUCCESS;
4806
4807 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4808 &fw_ddb_entry_dma, GFP_KERNEL);
4809 if (!fw_ddb_entry) {
4810 DEBUG2(ql4_printk(KERN_ERR, ha,
4811 "%s: Unable to allocate dma buffer.\n",
4812 __func__));
4813 ret = QLA_ERROR;
4814 return ret;
4815 }
4816
4817 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4818 fw_ddb_entry_dma, ddb_index)) {
4819 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4820 "index [%d]\n", __func__, ddb_index));
4821 ret = QLA_ERROR;
4822 goto exit_boot_target;
4823 }
4824
4825 /* Update target name and IP from DDB */
4826 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4827 min(sizeof(boot_sess->target_name),
4828 sizeof(fw_ddb_entry->iscsi_name)));
4829
4830 options = le16_to_cpu(fw_ddb_entry->options);
4831 if (options & DDB_OPT_IPV6_DEVICE) {
4832 memcpy(&boot_conn->dest_ipaddr.ip_address,
4833 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4834 } else {
4835 boot_conn->dest_ipaddr.ip_type = 0x1;
4836 memcpy(&boot_conn->dest_ipaddr.ip_address,
4837 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4838 }
4839
4840 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4841
4842 /* update chap information */
4843 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4844
4845 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4846
4847 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4848
4849 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4850 target_chap_name,
4851 (char *)&boot_conn->chap.target_secret,
4852 idx);
4853 if (ret) {
4854 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4855 ret = QLA_ERROR;
4856 goto exit_boot_target;
4857 }
4858
4859 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4860 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4861 }
4862
4863 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4864
4865 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4866
4867 ret = qla4xxx_get_bidi_chap(ha,
4868 (char *)&boot_conn->chap.intr_chap_name,
4869 (char *)&boot_conn->chap.intr_secret);
4870
4871 if (ret) {
4872 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4873 ret = QLA_ERROR;
4874 goto exit_boot_target;
4875 }
4876
4877 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4878 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4879 }
4880
4881 exit_boot_target:
4882 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4883 fw_ddb_entry, fw_ddb_entry_dma);
4884 return ret;
4885 }
4886
4887 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4888 {
4889 uint16_t ddb_index[2];
4890 int ret = QLA_ERROR;
4891 int rval;
4892
4893 memset(ddb_index, 0, sizeof(ddb_index));
4894 ddb_index[0] = 0xffff;
4895 ddb_index[1] = 0xffff;
4896 ret = get_fw_boot_info(ha, ddb_index);
4897 if (ret != QLA_SUCCESS) {
4898 DEBUG2(ql4_printk(KERN_INFO, ha,
4899 "%s: No boot target configured.\n", __func__));
4900 return ret;
4901 }
4902
4903 if (ql4xdisablesysfsboot)
4904 return QLA_SUCCESS;
4905
4906 if (ddb_index[0] == 0xffff)
4907 goto sec_target;
4908
4909 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4910 ddb_index[0]);
4911 if (rval != QLA_SUCCESS) {
4912 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4913 "configured\n", __func__));
4914 } else
4915 ret = QLA_SUCCESS;
4916
4917 sec_target:
4918 if (ddb_index[1] == 0xffff)
4919 goto exit_get_boot_info;
4920
4921 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4922 ddb_index[1]);
4923 if (rval != QLA_SUCCESS) {
4924 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4925 " configured\n", __func__));
4926 } else
4927 ret = QLA_SUCCESS;
4928
4929 exit_get_boot_info:
4930 return ret;
4931 }
4932
4933 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4934 {
4935 struct iscsi_boot_kobj *boot_kobj;
4936
4937 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4938 return QLA_ERROR;
4939
4940 if (ql4xdisablesysfsboot) {
4941 ql4_printk(KERN_INFO, ha,
4942 "%s: syfsboot disabled - driver will trigger login "
4943 "and publish session for discovery .\n", __func__);
4944 return QLA_SUCCESS;
4945 }
4946
4947
4948 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4949 if (!ha->boot_kset)
4950 goto kset_free;
4951
4952 if (!scsi_host_get(ha->host))
4953 goto kset_free;
4954 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4955 qla4xxx_show_boot_tgt_pri_info,
4956 qla4xxx_tgt_get_attr_visibility,
4957 qla4xxx_boot_release);
4958 if (!boot_kobj)
4959 goto put_host;
4960
4961 if (!scsi_host_get(ha->host))
4962 goto kset_free;
4963 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4964 qla4xxx_show_boot_tgt_sec_info,
4965 qla4xxx_tgt_get_attr_visibility,
4966 qla4xxx_boot_release);
4967 if (!boot_kobj)
4968 goto put_host;
4969
4970 if (!scsi_host_get(ha->host))
4971 goto kset_free;
4972 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4973 qla4xxx_show_boot_ini_info,
4974 qla4xxx_ini_get_attr_visibility,
4975 qla4xxx_boot_release);
4976 if (!boot_kobj)
4977 goto put_host;
4978
4979 if (!scsi_host_get(ha->host))
4980 goto kset_free;
4981 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4982 qla4xxx_show_boot_eth_info,
4983 qla4xxx_eth_get_attr_visibility,
4984 qla4xxx_boot_release);
4985 if (!boot_kobj)
4986 goto put_host;
4987
4988 return QLA_SUCCESS;
4989
4990 put_host:
4991 scsi_host_put(ha->host);
4992 kset_free:
4993 iscsi_boot_destroy_kset(ha->boot_kset);
4994 return -ENOMEM;
4995 }
4996
4997
4998 /**
4999 * qla4xxx_create chap_list - Create CHAP list from FLASH
5000 * @ha: pointer to adapter structure
5001 *
5002 * Read flash and make a list of CHAP entries, during login when a CHAP entry
5003 * is received, it will be checked in this list. If entry exist then the CHAP
5004 * entry index is set in the DDB. If CHAP entry does not exist in this list
5005 * then a new entry is added in FLASH in CHAP table and the index obtained is
5006 * used in the DDB.
5007 **/
5008 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
5009 {
5010 int rval = 0;
5011 uint8_t *chap_flash_data = NULL;
5012 uint32_t offset;
5013 dma_addr_t chap_dma;
5014 uint32_t chap_size = 0;
5015
5016 if (is_qla40XX(ha))
5017 chap_size = MAX_CHAP_ENTRIES_40XX *
5018 sizeof(struct ql4_chap_table);
5019 else /* Single region contains CHAP info for both
5020 * ports which is divided into half for each port.
5021 */
5022 chap_size = ha->hw.flt_chap_size / 2;
5023
5024 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
5025 &chap_dma, GFP_KERNEL);
5026 if (!chap_flash_data) {
5027 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
5028 return;
5029 }
5030 if (is_qla40XX(ha))
5031 offset = FLASH_CHAP_OFFSET;
5032 else {
5033 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
5034 if (ha->port_num == 1)
5035 offset += chap_size;
5036 }
5037
5038 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
5039 if (rval != QLA_SUCCESS)
5040 goto exit_chap_list;
5041
5042 if (ha->chap_list == NULL)
5043 ha->chap_list = vmalloc(chap_size);
5044 if (ha->chap_list == NULL) {
5045 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
5046 goto exit_chap_list;
5047 }
5048
5049 memcpy(ha->chap_list, chap_flash_data, chap_size);
5050
5051 exit_chap_list:
5052 dma_free_coherent(&ha->pdev->dev, chap_size,
5053 chap_flash_data, chap_dma);
5054 }
5055
5056 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
5057 struct ql4_tuple_ddb *tddb)
5058 {
5059 struct scsi_qla_host *ha;
5060 struct iscsi_cls_session *cls_sess;
5061 struct iscsi_cls_conn *cls_conn;
5062 struct iscsi_session *sess;
5063 struct iscsi_conn *conn;
5064
5065 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
5066 ha = ddb_entry->ha;
5067 cls_sess = ddb_entry->sess;
5068 sess = cls_sess->dd_data;
5069 cls_conn = ddb_entry->conn;
5070 conn = cls_conn->dd_data;
5071
5072 tddb->tpgt = sess->tpgt;
5073 tddb->port = conn->persistent_port;
5074 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
5075 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
5076 }
5077
5078 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
5079 struct ql4_tuple_ddb *tddb,
5080 uint8_t *flash_isid)
5081 {
5082 uint16_t options = 0;
5083
5084 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
5085 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
5086 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
5087
5088 options = le16_to_cpu(fw_ddb_entry->options);
5089 if (options & DDB_OPT_IPV6_DEVICE)
5090 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
5091 else
5092 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
5093
5094 tddb->port = le16_to_cpu(fw_ddb_entry->port);
5095
5096 if (flash_isid == NULL)
5097 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
5098 sizeof(tddb->isid));
5099 else
5100 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
5101 }
5102
5103 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
5104 struct ql4_tuple_ddb *old_tddb,
5105 struct ql4_tuple_ddb *new_tddb,
5106 uint8_t is_isid_compare)
5107 {
5108 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5109 return QLA_ERROR;
5110
5111 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
5112 return QLA_ERROR;
5113
5114 if (old_tddb->port != new_tddb->port)
5115 return QLA_ERROR;
5116
5117 /* For multi sessions, driver generates the ISID, so do not compare
5118 * ISID in reset path since it would be a comparison between the
5119 * driver generated ISID and firmware generated ISID. This could
5120 * lead to adding duplicated DDBs in the list as driver generated
5121 * ISID would not match firmware generated ISID.
5122 */
5123 if (is_isid_compare) {
5124 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
5125 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
5126 __func__, old_tddb->isid[5], old_tddb->isid[4],
5127 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
5128 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
5129 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
5130 new_tddb->isid[0]));
5131
5132 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5133 sizeof(old_tddb->isid)))
5134 return QLA_ERROR;
5135 }
5136
5137 DEBUG2(ql4_printk(KERN_INFO, ha,
5138 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
5139 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
5140 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
5141 new_tddb->ip_addr, new_tddb->iscsi_name));
5142
5143 return QLA_SUCCESS;
5144 }
5145
5146 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
5147 struct dev_db_entry *fw_ddb_entry,
5148 uint32_t *index)
5149 {
5150 struct ddb_entry *ddb_entry;
5151 struct ql4_tuple_ddb *fw_tddb = NULL;
5152 struct ql4_tuple_ddb *tmp_tddb = NULL;
5153 int idx;
5154 int ret = QLA_ERROR;
5155
5156 fw_tddb = vzalloc(sizeof(*fw_tddb));
5157 if (!fw_tddb) {
5158 DEBUG2(ql4_printk(KERN_WARNING, ha,
5159 "Memory Allocation failed.\n"));
5160 ret = QLA_SUCCESS;
5161 goto exit_check;
5162 }
5163
5164 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5165 if (!tmp_tddb) {
5166 DEBUG2(ql4_printk(KERN_WARNING, ha,
5167 "Memory Allocation failed.\n"));
5168 ret = QLA_SUCCESS;
5169 goto exit_check;
5170 }
5171
5172 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5173
5174 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5175 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5176 if (ddb_entry == NULL)
5177 continue;
5178
5179 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
5180 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
5181 ret = QLA_SUCCESS; /* found */
5182 if (index != NULL)
5183 *index = idx;
5184 goto exit_check;
5185 }
5186 }
5187
5188 exit_check:
5189 if (fw_tddb)
5190 vfree(fw_tddb);
5191 if (tmp_tddb)
5192 vfree(tmp_tddb);
5193 return ret;
5194 }
5195
5196 /**
5197 * qla4xxx_check_existing_isid - check if target with same isid exist
5198 * in target list
5199 * @list_nt: list of target
5200 * @isid: isid to check
5201 *
5202 * This routine return QLA_SUCCESS if target with same isid exist
5203 **/
5204 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
5205 {
5206 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
5207 struct dev_db_entry *fw_ddb_entry;
5208
5209 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5210 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
5211
5212 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
5213 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
5214 return QLA_SUCCESS;
5215 }
5216 }
5217 return QLA_ERROR;
5218 }
5219
5220 /**
5221 * qla4xxx_update_isid - compare ddbs and updated isid
5222 * @ha: Pointer to host adapter structure.
5223 * @list_nt: list of nt target
5224 * @fw_ddb_entry: firmware ddb entry
5225 *
5226 * This routine update isid if ddbs have same iqn, same isid and
5227 * different IP addr.
5228 * Return QLA_SUCCESS if isid is updated.
5229 **/
5230 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
5231 struct list_head *list_nt,
5232 struct dev_db_entry *fw_ddb_entry)
5233 {
5234 uint8_t base_value, i;
5235
5236 base_value = fw_ddb_entry->isid[1] & 0x1f;
5237 for (i = 0; i < 8; i++) {
5238 fw_ddb_entry->isid[1] = (base_value | (i << 5));
5239 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5240 break;
5241 }
5242
5243 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5244 return QLA_ERROR;
5245
5246 return QLA_SUCCESS;
5247 }
5248
5249 /**
5250 * qla4xxx_should_update_isid - check if isid need to update
5251 * @ha: Pointer to host adapter structure.
5252 * @old_tddb: ddb tuple
5253 * @new_tddb: ddb tuple
5254 *
5255 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
5256 * same isid
5257 **/
5258 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
5259 struct ql4_tuple_ddb *old_tddb,
5260 struct ql4_tuple_ddb *new_tddb)
5261 {
5262 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
5263 /* Same ip */
5264 if (old_tddb->port == new_tddb->port)
5265 return QLA_ERROR;
5266 }
5267
5268 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5269 /* different iqn */
5270 return QLA_ERROR;
5271
5272 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5273 sizeof(old_tddb->isid)))
5274 /* different isid */
5275 return QLA_ERROR;
5276
5277 return QLA_SUCCESS;
5278 }
5279
5280 /**
5281 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
5282 * @ha: Pointer to host adapter structure.
5283 * @list_nt: list of nt target.
5284 * @fw_ddb_entry: firmware ddb entry.
5285 *
5286 * This routine check if fw_ddb_entry already exists in list_nt to avoid
5287 * duplicate ddb in list_nt.
5288 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
5289 * Note: This function also update isid of DDB if required.
5290 **/
5291
5292 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
5293 struct list_head *list_nt,
5294 struct dev_db_entry *fw_ddb_entry)
5295 {
5296 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
5297 struct ql4_tuple_ddb *fw_tddb = NULL;
5298 struct ql4_tuple_ddb *tmp_tddb = NULL;
5299 int rval, ret = QLA_ERROR;
5300
5301 fw_tddb = vzalloc(sizeof(*fw_tddb));
5302 if (!fw_tddb) {
5303 DEBUG2(ql4_printk(KERN_WARNING, ha,
5304 "Memory Allocation failed.\n"));
5305 ret = QLA_SUCCESS;
5306 goto exit_check;
5307 }
5308
5309 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5310 if (!tmp_tddb) {
5311 DEBUG2(ql4_printk(KERN_WARNING, ha,
5312 "Memory Allocation failed.\n"));
5313 ret = QLA_SUCCESS;
5314 goto exit_check;
5315 }
5316
5317 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5318
5319 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5320 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
5321 nt_ddb_idx->flash_isid);
5322 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
5323 /* found duplicate ddb */
5324 if (ret == QLA_SUCCESS)
5325 goto exit_check;
5326 }
5327
5328 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5329 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
5330
5331 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
5332 if (ret == QLA_SUCCESS) {
5333 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
5334 if (rval == QLA_SUCCESS)
5335 ret = QLA_ERROR;
5336 else
5337 ret = QLA_SUCCESS;
5338
5339 goto exit_check;
5340 }
5341 }
5342
5343 exit_check:
5344 if (fw_tddb)
5345 vfree(fw_tddb);
5346 if (tmp_tddb)
5347 vfree(tmp_tddb);
5348 return ret;
5349 }
5350
5351 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
5352 {
5353 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5354
5355 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5356 list_del_init(&ddb_idx->list);
5357 vfree(ddb_idx);
5358 }
5359 }
5360
5361 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
5362 struct dev_db_entry *fw_ddb_entry)
5363 {
5364 struct iscsi_endpoint *ep;
5365 struct sockaddr_in *addr;
5366 struct sockaddr_in6 *addr6;
5367 struct sockaddr *t_addr;
5368 struct sockaddr_storage *dst_addr;
5369 char *ip;
5370
5371 /* TODO: need to destroy on unload iscsi_endpoint*/
5372 dst_addr = vmalloc(sizeof(*dst_addr));
5373 if (!dst_addr)
5374 return NULL;
5375
5376 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
5377 t_addr = (struct sockaddr *)dst_addr;
5378 t_addr->sa_family = AF_INET6;
5379 addr6 = (struct sockaddr_in6 *)dst_addr;
5380 ip = (char *)&addr6->sin6_addr;
5381 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
5382 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
5383
5384 } else {
5385 t_addr = (struct sockaddr *)dst_addr;
5386 t_addr->sa_family = AF_INET;
5387 addr = (struct sockaddr_in *)dst_addr;
5388 ip = (char *)&addr->sin_addr;
5389 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
5390 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
5391 }
5392
5393 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
5394 vfree(dst_addr);
5395 return ep;
5396 }
5397
5398 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
5399 {
5400 if (ql4xdisablesysfsboot)
5401 return QLA_SUCCESS;
5402 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
5403 return QLA_ERROR;
5404 return QLA_SUCCESS;
5405 }
5406
5407 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
5408 struct ddb_entry *ddb_entry,
5409 uint16_t idx)
5410 {
5411 uint16_t def_timeout;
5412
5413 ddb_entry->ddb_type = FLASH_DDB;
5414 ddb_entry->fw_ddb_index = INVALID_ENTRY;
5415 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
5416 ddb_entry->ha = ha;
5417 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
5418 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
5419 ddb_entry->chap_tbl_idx = INVALID_ENTRY;
5420
5421 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
5422 atomic_set(&ddb_entry->relogin_timer, 0);
5423 atomic_set(&ddb_entry->relogin_retry_count, 0);
5424 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
5425 ddb_entry->default_relogin_timeout =
5426 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
5427 def_timeout : LOGIN_TOV;
5428 ddb_entry->default_time2wait =
5429 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
5430
5431 if (ql4xdisablesysfsboot &&
5432 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
5433 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
5434 }
5435
5436 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
5437 {
5438 uint32_t idx = 0;
5439 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
5440 uint32_t sts[MBOX_REG_COUNT];
5441 uint32_t ip_state;
5442 unsigned long wtime;
5443 int ret;
5444
5445 wtime = jiffies + (HZ * IP_CONFIG_TOV);
5446 do {
5447 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
5448 if (ip_idx[idx] == -1)
5449 continue;
5450
5451 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
5452
5453 if (ret == QLA_ERROR) {
5454 ip_idx[idx] = -1;
5455 continue;
5456 }
5457
5458 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
5459
5460 DEBUG2(ql4_printk(KERN_INFO, ha,
5461 "Waiting for IP state for idx = %d, state = 0x%x\n",
5462 ip_idx[idx], ip_state));
5463 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
5464 ip_state == IP_ADDRSTATE_INVALID ||
5465 ip_state == IP_ADDRSTATE_PREFERRED ||
5466 ip_state == IP_ADDRSTATE_DEPRICATED ||
5467 ip_state == IP_ADDRSTATE_DISABLING)
5468 ip_idx[idx] = -1;
5469 }
5470
5471 /* Break if all IP states checked */
5472 if ((ip_idx[0] == -1) &&
5473 (ip_idx[1] == -1) &&
5474 (ip_idx[2] == -1) &&
5475 (ip_idx[3] == -1))
5476 break;
5477 schedule_timeout_uninterruptible(HZ);
5478 } while (time_after(wtime, jiffies));
5479 }
5480
5481 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
5482 struct dev_db_entry *flash_ddb_entry)
5483 {
5484 uint16_t options = 0;
5485 size_t ip_len = IP_ADDR_LEN;
5486
5487 options = le16_to_cpu(fw_ddb_entry->options);
5488 if (options & DDB_OPT_IPV6_DEVICE)
5489 ip_len = IPv6_ADDR_LEN;
5490
5491 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
5492 return QLA_ERROR;
5493
5494 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
5495 sizeof(fw_ddb_entry->isid)))
5496 return QLA_ERROR;
5497
5498 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
5499 sizeof(fw_ddb_entry->port)))
5500 return QLA_ERROR;
5501
5502 return QLA_SUCCESS;
5503 }
5504
5505 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
5506 struct dev_db_entry *fw_ddb_entry,
5507 uint32_t fw_idx, uint32_t *flash_index)
5508 {
5509 struct dev_db_entry *flash_ddb_entry;
5510 dma_addr_t flash_ddb_entry_dma;
5511 uint32_t idx = 0;
5512 int max_ddbs;
5513 int ret = QLA_ERROR, status;
5514
5515 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5516 MAX_DEV_DB_ENTRIES;
5517
5518 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5519 &flash_ddb_entry_dma);
5520 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
5521 ql4_printk(KERN_ERR, ha, "Out of memory\n");
5522 goto exit_find_st_idx;
5523 }
5524
5525 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5526 flash_ddb_entry_dma, fw_idx);
5527 if (status == QLA_SUCCESS) {
5528 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5529 if (status == QLA_SUCCESS) {
5530 *flash_index = fw_idx;
5531 ret = QLA_SUCCESS;
5532 goto exit_find_st_idx;
5533 }
5534 }
5535
5536 for (idx = 0; idx < max_ddbs; idx++) {
5537 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5538 flash_ddb_entry_dma, idx);
5539 if (status == QLA_ERROR)
5540 continue;
5541
5542 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5543 if (status == QLA_SUCCESS) {
5544 *flash_index = idx;
5545 ret = QLA_SUCCESS;
5546 goto exit_find_st_idx;
5547 }
5548 }
5549
5550 if (idx == max_ddbs)
5551 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
5552 fw_idx);
5553
5554 exit_find_st_idx:
5555 if (flash_ddb_entry)
5556 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
5557 flash_ddb_entry_dma);
5558
5559 return ret;
5560 }
5561
5562 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
5563 struct list_head *list_st)
5564 {
5565 struct qla_ddb_index *st_ddb_idx;
5566 int max_ddbs;
5567 int fw_idx_size;
5568 struct dev_db_entry *fw_ddb_entry;
5569 dma_addr_t fw_ddb_dma;
5570 int ret;
5571 uint32_t idx = 0, next_idx = 0;
5572 uint32_t state = 0, conn_err = 0;
5573 uint32_t flash_index = -1;
5574 uint16_t conn_id = 0;
5575
5576 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5577 &fw_ddb_dma);
5578 if (fw_ddb_entry == NULL) {
5579 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5580 goto exit_st_list;
5581 }
5582
5583 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5584 MAX_DEV_DB_ENTRIES;
5585 fw_idx_size = sizeof(struct qla_ddb_index);
5586
5587 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5588 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5589 NULL, &next_idx, &state,
5590 &conn_err, NULL, &conn_id);
5591 if (ret == QLA_ERROR)
5592 break;
5593
5594 /* Ignore DDB if invalid state (unassigned) */
5595 if (state == DDB_DS_UNASSIGNED)
5596 goto continue_next_st;
5597
5598 /* Check if ST, add to the list_st */
5599 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
5600 goto continue_next_st;
5601
5602 st_ddb_idx = vzalloc(fw_idx_size);
5603 if (!st_ddb_idx)
5604 break;
5605
5606 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
5607 &flash_index);
5608 if (ret == QLA_ERROR) {
5609 ql4_printk(KERN_ERR, ha,
5610 "No flash entry for ST at idx [%d]\n", idx);
5611 st_ddb_idx->flash_ddb_idx = idx;
5612 } else {
5613 ql4_printk(KERN_INFO, ha,
5614 "ST at idx [%d] is stored at flash [%d]\n",
5615 idx, flash_index);
5616 st_ddb_idx->flash_ddb_idx = flash_index;
5617 }
5618
5619 st_ddb_idx->fw_ddb_idx = idx;
5620
5621 list_add_tail(&st_ddb_idx->list, list_st);
5622 continue_next_st:
5623 if (next_idx == 0)
5624 break;
5625 }
5626
5627 exit_st_list:
5628 if (fw_ddb_entry)
5629 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5630 }
5631
5632 /**
5633 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
5634 * @ha: pointer to adapter structure
5635 * @list_ddb: List from which failed ddb to be removed
5636 *
5637 * Iterate over the list of DDBs and find and remove DDBs that are either in
5638 * no connection active state or failed state
5639 **/
5640 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
5641 struct list_head *list_ddb)
5642 {
5643 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5644 uint32_t next_idx = 0;
5645 uint32_t state = 0, conn_err = 0;
5646 int ret;
5647
5648 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5649 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
5650 NULL, 0, NULL, &next_idx, &state,
5651 &conn_err, NULL, NULL);
5652 if (ret == QLA_ERROR)
5653 continue;
5654
5655 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5656 state == DDB_DS_SESSION_FAILED) {
5657 list_del_init(&ddb_idx->list);
5658 vfree(ddb_idx);
5659 }
5660 }
5661 }
5662
5663 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
5664 struct ddb_entry *ddb_entry,
5665 struct dev_db_entry *fw_ddb_entry)
5666 {
5667 struct iscsi_cls_session *cls_sess;
5668 struct iscsi_session *sess;
5669 uint32_t max_ddbs = 0;
5670 uint16_t ddb_link = -1;
5671
5672 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5673 MAX_DEV_DB_ENTRIES;
5674
5675 cls_sess = ddb_entry->sess;
5676 sess = cls_sess->dd_data;
5677
5678 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5679 if (ddb_link < max_ddbs)
5680 sess->discovery_parent_idx = ddb_link;
5681 else
5682 sess->discovery_parent_idx = DDB_NO_LINK;
5683 }
5684
5685 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
5686 struct dev_db_entry *fw_ddb_entry,
5687 int is_reset, uint16_t idx)
5688 {
5689 struct iscsi_cls_session *cls_sess;
5690 struct iscsi_session *sess;
5691 struct iscsi_cls_conn *cls_conn;
5692 struct iscsi_endpoint *ep;
5693 uint16_t cmds_max = 32;
5694 uint16_t conn_id = 0;
5695 uint32_t initial_cmdsn = 0;
5696 int ret = QLA_SUCCESS;
5697
5698 struct ddb_entry *ddb_entry = NULL;
5699
5700 /* Create session object, with INVALID_ENTRY,
5701 * the targer_id would get set when we issue the login
5702 */
5703 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
5704 cmds_max, sizeof(struct ddb_entry),
5705 sizeof(struct ql4_task_data),
5706 initial_cmdsn, INVALID_ENTRY);
5707 if (!cls_sess) {
5708 ret = QLA_ERROR;
5709 goto exit_setup;
5710 }
5711
5712 /*
5713 * so calling module_put function to decrement the
5714 * reference count.
5715 **/
5716 module_put(qla4xxx_iscsi_transport.owner);
5717 sess = cls_sess->dd_data;
5718 ddb_entry = sess->dd_data;
5719 ddb_entry->sess = cls_sess;
5720
5721 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
5722 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
5723 sizeof(struct dev_db_entry));
5724
5725 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
5726
5727 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
5728
5729 if (!cls_conn) {
5730 ret = QLA_ERROR;
5731 goto exit_setup;
5732 }
5733
5734 ddb_entry->conn = cls_conn;
5735
5736 /* Setup ep, for displaying attributes in sysfs */
5737 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
5738 if (ep) {
5739 ep->conn = cls_conn;
5740 cls_conn->ep = ep;
5741 } else {
5742 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
5743 ret = QLA_ERROR;
5744 goto exit_setup;
5745 }
5746
5747 /* Update sess/conn params */
5748 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
5749 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
5750
5751 if (is_reset == RESET_ADAPTER) {
5752 iscsi_block_session(cls_sess);
5753 /* Use the relogin path to discover new devices
5754 * by short-circuting the logic of setting
5755 * timer to relogin - instead set the flags
5756 * to initiate login right away.
5757 */
5758 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
5759 set_bit(DF_RELOGIN, &ddb_entry->flags);
5760 }
5761
5762 exit_setup:
5763 return ret;
5764 }
5765
5766 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
5767 struct list_head *list_ddb,
5768 struct dev_db_entry *fw_ddb_entry)
5769 {
5770 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5771 uint16_t ddb_link;
5772
5773 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5774
5775 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5776 if (ddb_idx->fw_ddb_idx == ddb_link) {
5777 DEBUG2(ql4_printk(KERN_INFO, ha,
5778 "Updating NT parent idx from [%d] to [%d]\n",
5779 ddb_link, ddb_idx->flash_ddb_idx));
5780 fw_ddb_entry->ddb_link =
5781 cpu_to_le16(ddb_idx->flash_ddb_idx);
5782 return;
5783 }
5784 }
5785 }
5786
5787 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5788 struct list_head *list_nt,
5789 struct list_head *list_st,
5790 int is_reset)
5791 {
5792 struct dev_db_entry *fw_ddb_entry;
5793 struct ddb_entry *ddb_entry = NULL;
5794 dma_addr_t fw_ddb_dma;
5795 int max_ddbs;
5796 int fw_idx_size;
5797 int ret;
5798 uint32_t idx = 0, next_idx = 0;
5799 uint32_t state = 0, conn_err = 0;
5800 uint32_t ddb_idx = -1;
5801 uint16_t conn_id = 0;
5802 uint16_t ddb_link = -1;
5803 struct qla_ddb_index *nt_ddb_idx;
5804
5805 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5806 &fw_ddb_dma);
5807 if (fw_ddb_entry == NULL) {
5808 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5809 goto exit_nt_list;
5810 }
5811 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5812 MAX_DEV_DB_ENTRIES;
5813 fw_idx_size = sizeof(struct qla_ddb_index);
5814
5815 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5816 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5817 NULL, &next_idx, &state,
5818 &conn_err, NULL, &conn_id);
5819 if (ret == QLA_ERROR)
5820 break;
5821
5822 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
5823 goto continue_next_nt;
5824
5825 /* Check if NT, then add to list it */
5826 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
5827 goto continue_next_nt;
5828
5829 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5830 if (ddb_link < max_ddbs)
5831 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
5832
5833 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
5834 state == DDB_DS_SESSION_FAILED) &&
5835 (is_reset == INIT_ADAPTER))
5836 goto continue_next_nt;
5837
5838 DEBUG2(ql4_printk(KERN_INFO, ha,
5839 "Adding DDB to session = 0x%x\n", idx));
5840
5841 if (is_reset == INIT_ADAPTER) {
5842 nt_ddb_idx = vmalloc(fw_idx_size);
5843 if (!nt_ddb_idx)
5844 break;
5845
5846 nt_ddb_idx->fw_ddb_idx = idx;
5847
5848 /* Copy original isid as it may get updated in function
5849 * qla4xxx_update_isid(). We need original isid in
5850 * function qla4xxx_compare_tuple_ddb to find duplicate
5851 * target */
5852 memcpy(&nt_ddb_idx->flash_isid[0],
5853 &fw_ddb_entry->isid[0],
5854 sizeof(nt_ddb_idx->flash_isid));
5855
5856 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
5857 fw_ddb_entry);
5858 if (ret == QLA_SUCCESS) {
5859 /* free nt_ddb_idx and do not add to list_nt */
5860 vfree(nt_ddb_idx);
5861 goto continue_next_nt;
5862 }
5863
5864 /* Copy updated isid */
5865 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
5866 sizeof(struct dev_db_entry));
5867
5868 list_add_tail(&nt_ddb_idx->list, list_nt);
5869 } else if (is_reset == RESET_ADAPTER) {
5870 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
5871 &ddb_idx);
5872 if (ret == QLA_SUCCESS) {
5873 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
5874 ddb_idx);
5875 if (ddb_entry != NULL)
5876 qla4xxx_update_sess_disc_idx(ha,
5877 ddb_entry,
5878 fw_ddb_entry);
5879 goto continue_next_nt;
5880 }
5881 }
5882
5883 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5884 if (ret == QLA_ERROR)
5885 goto exit_nt_list;
5886
5887 continue_next_nt:
5888 if (next_idx == 0)
5889 break;
5890 }
5891
5892 exit_nt_list:
5893 if (fw_ddb_entry)
5894 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5895 }
5896
5897 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5898 struct list_head *list_nt,
5899 uint16_t target_id)
5900 {
5901 struct dev_db_entry *fw_ddb_entry;
5902 dma_addr_t fw_ddb_dma;
5903 int max_ddbs;
5904 int fw_idx_size;
5905 int ret;
5906 uint32_t idx = 0, next_idx = 0;
5907 uint32_t state = 0, conn_err = 0;
5908 uint16_t conn_id = 0;
5909 struct qla_ddb_index *nt_ddb_idx;
5910
5911 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5912 &fw_ddb_dma);
5913 if (fw_ddb_entry == NULL) {
5914 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5915 goto exit_new_nt_list;
5916 }
5917 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5918 MAX_DEV_DB_ENTRIES;
5919 fw_idx_size = sizeof(struct qla_ddb_index);
5920
5921 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5922 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5923 NULL, &next_idx, &state,
5924 &conn_err, NULL, &conn_id);
5925 if (ret == QLA_ERROR)
5926 break;
5927
5928 /* Check if NT, then add it to list */
5929 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5930 goto continue_next_new_nt;
5931
5932 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5933 goto continue_next_new_nt;
5934
5935 DEBUG2(ql4_printk(KERN_INFO, ha,
5936 "Adding DDB to session = 0x%x\n", idx));
5937
5938 nt_ddb_idx = vmalloc(fw_idx_size);
5939 if (!nt_ddb_idx)
5940 break;
5941
5942 nt_ddb_idx->fw_ddb_idx = idx;
5943
5944 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
5945 if (ret == QLA_SUCCESS) {
5946 /* free nt_ddb_idx and do not add to list_nt */
5947 vfree(nt_ddb_idx);
5948 goto continue_next_new_nt;
5949 }
5950
5951 if (target_id < max_ddbs)
5952 fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
5953
5954 list_add_tail(&nt_ddb_idx->list, list_nt);
5955
5956 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5957 idx);
5958 if (ret == QLA_ERROR)
5959 goto exit_new_nt_list;
5960
5961 continue_next_new_nt:
5962 if (next_idx == 0)
5963 break;
5964 }
5965
5966 exit_new_nt_list:
5967 if (fw_ddb_entry)
5968 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5969 }
5970
5971 /**
5972 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
5973 * @dev: dev associated with the sysfs entry
5974 * @data: pointer to flashnode session object
5975 *
5976 * Returns:
5977 * 1: if flashnode entry is non-persistent
5978 * 0: if flashnode entry is persistent
5979 **/
5980 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5981 {
5982 struct iscsi_bus_flash_session *fnode_sess;
5983
5984 if (!iscsi_flashnode_bus_match(dev, NULL))
5985 return 0;
5986
5987 fnode_sess = iscsi_dev_to_flash_session(dev);
5988
5989 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
5990 }
5991
5992 /**
5993 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
5994 * @ha: pointer to host
5995 * @fw_ddb_entry: flash ddb data
5996 * @idx: target index
5997 * @user: if set then this call is made from userland else from kernel
5998 *
5999 * Returns:
6000 * On sucess: QLA_SUCCESS
6001 * On failure: QLA_ERROR
6002 *
6003 * This create separate sysfs entries for session and connection attributes of
6004 * the given fw ddb entry.
6005 * If this is invoked as a result of a userspace call then the entry is marked
6006 * as nonpersistent using flash_state field.
6007 **/
6008 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
6009 struct dev_db_entry *fw_ddb_entry,
6010 uint16_t *idx, int user)
6011 {
6012 struct iscsi_bus_flash_session *fnode_sess = NULL;
6013 struct iscsi_bus_flash_conn *fnode_conn = NULL;
6014 int rc = QLA_ERROR;
6015
6016 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
6017 &qla4xxx_iscsi_transport, 0);
6018 if (!fnode_sess) {
6019 ql4_printk(KERN_ERR, ha,
6020 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
6021 __func__, *idx, ha->host_no);
6022 goto exit_tgt_create;
6023 }
6024
6025 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
6026 &qla4xxx_iscsi_transport, 0);
6027 if (!fnode_conn) {
6028 ql4_printk(KERN_ERR, ha,
6029 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
6030 __func__, *idx, ha->host_no);
6031 goto free_sess;
6032 }
6033
6034 if (user) {
6035 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
6036 } else {
6037 fnode_sess->flash_state = DEV_DB_PERSISTENT;
6038
6039 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
6040 fnode_sess->is_boot_target = 1;
6041 else
6042 fnode_sess->is_boot_target = 0;
6043 }
6044
6045 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
6046 fw_ddb_entry);
6047
6048 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
6049 __func__, fnode_sess->dev.kobj.name);
6050
6051 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
6052 __func__, fnode_conn->dev.kobj.name);
6053
6054 return QLA_SUCCESS;
6055
6056 free_sess:
6057 iscsi_destroy_flashnode_sess(fnode_sess);
6058
6059 exit_tgt_create:
6060 return QLA_ERROR;
6061 }
6062
6063 /**
6064 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
6065 * @shost: pointer to host
6066 * @buf: type of ddb entry (ipv4/ipv6)
6067 * @len: length of buf
6068 *
6069 * This creates new ddb entry in the flash by finding first free index and
6070 * storing default ddb there. And then create sysfs entry for the new ddb entry.
6071 **/
6072 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
6073 int len)
6074 {
6075 struct scsi_qla_host *ha = to_qla_host(shost);
6076 struct dev_db_entry *fw_ddb_entry = NULL;
6077 dma_addr_t fw_ddb_entry_dma;
6078 struct device *dev;
6079 uint16_t idx = 0;
6080 uint16_t max_ddbs = 0;
6081 uint32_t options = 0;
6082 uint32_t rval = QLA_ERROR;
6083
6084 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
6085 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
6086 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
6087 __func__));
6088 goto exit_ddb_add;
6089 }
6090
6091 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
6092 MAX_DEV_DB_ENTRIES;
6093
6094 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6095 &fw_ddb_entry_dma, GFP_KERNEL);
6096 if (!fw_ddb_entry) {
6097 DEBUG2(ql4_printk(KERN_ERR, ha,
6098 "%s: Unable to allocate dma buffer\n",
6099 __func__));
6100 goto exit_ddb_add;
6101 }
6102
6103 dev = iscsi_find_flashnode_sess(ha->host, NULL,
6104 qla4xxx_sysfs_ddb_is_non_persistent);
6105 if (dev) {
6106 ql4_printk(KERN_ERR, ha,
6107 "%s: A non-persistent entry %s found\n",
6108 __func__, dev->kobj.name);
6109 put_device(dev);
6110 goto exit_ddb_add;
6111 }
6112
6113 /* Index 0 and 1 are reserved for boot target entries */
6114 for (idx = 2; idx < max_ddbs; idx++) {
6115 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
6116 fw_ddb_entry_dma, idx))
6117 break;
6118 }
6119
6120 if (idx == max_ddbs)
6121 goto exit_ddb_add;
6122
6123 if (!strncasecmp("ipv6", buf, 4))
6124 options |= IPV6_DEFAULT_DDB_ENTRY;
6125
6126 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6127 if (rval == QLA_ERROR)
6128 goto exit_ddb_add;
6129
6130 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
6131
6132 exit_ddb_add:
6133 if (fw_ddb_entry)
6134 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6135 fw_ddb_entry, fw_ddb_entry_dma);
6136 if (rval == QLA_SUCCESS)
6137 return idx;
6138 else
6139 return -EIO;
6140 }
6141
6142 /**
6143 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
6144 * @fnode_sess: pointer to session attrs of flash ddb entry
6145 * @fnode_conn: pointer to connection attrs of flash ddb entry
6146 *
6147 * This writes the contents of target ddb buffer to Flash with a valid cookie
6148 * value in order to make the ddb entry persistent.
6149 **/
6150 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
6151 struct iscsi_bus_flash_conn *fnode_conn)
6152 {
6153 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6154 struct scsi_qla_host *ha = to_qla_host(shost);
6155 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6156 struct dev_db_entry *fw_ddb_entry = NULL;
6157 dma_addr_t fw_ddb_entry_dma;
6158 uint32_t options = 0;
6159 int rval = 0;
6160
6161 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6162 &fw_ddb_entry_dma, GFP_KERNEL);
6163 if (!fw_ddb_entry) {
6164 DEBUG2(ql4_printk(KERN_ERR, ha,
6165 "%s: Unable to allocate dma buffer\n",
6166 __func__));
6167 rval = -ENOMEM;
6168 goto exit_ddb_apply;
6169 }
6170
6171 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6172 options |= IPV6_DEFAULT_DDB_ENTRY;
6173
6174 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6175 if (rval == QLA_ERROR)
6176 goto exit_ddb_apply;
6177
6178 dev_db_start_offset += (fnode_sess->target_id *
6179 sizeof(*fw_ddb_entry));
6180
6181 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6182 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6183
6184 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
6185 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
6186
6187 if (rval == QLA_SUCCESS) {
6188 fnode_sess->flash_state = DEV_DB_PERSISTENT;
6189 ql4_printk(KERN_INFO, ha,
6190 "%s: flash node %u of host %lu written to flash\n",
6191 __func__, fnode_sess->target_id, ha->host_no);
6192 } else {
6193 rval = -EIO;
6194 ql4_printk(KERN_ERR, ha,
6195 "%s: Error while writing flash node %u of host %lu to flash\n",
6196 __func__, fnode_sess->target_id, ha->host_no);
6197 }
6198
6199 exit_ddb_apply:
6200 if (fw_ddb_entry)
6201 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6202 fw_ddb_entry, fw_ddb_entry_dma);
6203 return rval;
6204 }
6205
6206 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
6207 struct dev_db_entry *fw_ddb_entry,
6208 uint16_t idx)
6209 {
6210 struct dev_db_entry *ddb_entry = NULL;
6211 dma_addr_t ddb_entry_dma;
6212 unsigned long wtime;
6213 uint32_t mbx_sts = 0;
6214 uint32_t state = 0, conn_err = 0;
6215 uint16_t tmo = 0;
6216 int ret = 0;
6217
6218 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6219 &ddb_entry_dma, GFP_KERNEL);
6220 if (!ddb_entry) {
6221 DEBUG2(ql4_printk(KERN_ERR, ha,
6222 "%s: Unable to allocate dma buffer\n",
6223 __func__));
6224 return QLA_ERROR;
6225 }
6226
6227 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
6228
6229 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
6230 if (ret != QLA_SUCCESS) {
6231 DEBUG2(ql4_printk(KERN_ERR, ha,
6232 "%s: Unable to set ddb entry for index %d\n",
6233 __func__, idx));
6234 goto exit_ddb_conn_open;
6235 }
6236
6237 qla4xxx_conn_open(ha, idx);
6238
6239 /* To ensure that sendtargets is done, wait for at least 12 secs */
6240 tmo = ((ha->def_timeout > LOGIN_TOV) &&
6241 (ha->def_timeout < LOGIN_TOV * 10) ?
6242 ha->def_timeout : LOGIN_TOV);
6243
6244 DEBUG2(ql4_printk(KERN_INFO, ha,
6245 "Default time to wait for login to ddb %d\n", tmo));
6246
6247 wtime = jiffies + (HZ * tmo);
6248 do {
6249 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
6250 NULL, &state, &conn_err, NULL,
6251 NULL);
6252 if (ret == QLA_ERROR)
6253 continue;
6254
6255 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6256 state == DDB_DS_SESSION_FAILED)
6257 break;
6258
6259 schedule_timeout_uninterruptible(HZ / 10);
6260 } while (time_after(wtime, jiffies));
6261
6262 exit_ddb_conn_open:
6263 if (ddb_entry)
6264 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6265 ddb_entry, ddb_entry_dma);
6266 return ret;
6267 }
6268
6269 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
6270 struct dev_db_entry *fw_ddb_entry,
6271 uint16_t target_id)
6272 {
6273 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6274 struct list_head list_nt;
6275 uint16_t ddb_index;
6276 int ret = 0;
6277
6278 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
6279 ql4_printk(KERN_WARNING, ha,
6280 "%s: A discovery already in progress!\n", __func__);
6281 return QLA_ERROR;
6282 }
6283
6284 INIT_LIST_HEAD(&list_nt);
6285
6286 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6287
6288 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
6289 if (ret == QLA_ERROR)
6290 goto exit_login_st_clr_bit;
6291
6292 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
6293 if (ret == QLA_ERROR)
6294 goto exit_login_st;
6295
6296 qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
6297
6298 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
6299 list_del_init(&ddb_idx->list);
6300 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
6301 vfree(ddb_idx);
6302 }
6303
6304 exit_login_st:
6305 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
6306 ql4_printk(KERN_ERR, ha,
6307 "Unable to clear DDB index = 0x%x\n", ddb_index);
6308 }
6309
6310 clear_bit(ddb_index, ha->ddb_idx_map);
6311
6312 exit_login_st_clr_bit:
6313 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6314 return ret;
6315 }
6316
6317 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
6318 struct dev_db_entry *fw_ddb_entry,
6319 uint16_t idx)
6320 {
6321 int ret = QLA_ERROR;
6322
6323 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
6324 if (ret != QLA_SUCCESS)
6325 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
6326 idx);
6327 else
6328 ret = -EPERM;
6329
6330 return ret;
6331 }
6332
6333 /**
6334 * qla4xxx_sysfs_ddb_login - Login to the specified target
6335 * @fnode_sess: pointer to session attrs of flash ddb entry
6336 * @fnode_conn: pointer to connection attrs of flash ddb entry
6337 *
6338 * This logs in to the specified target
6339 **/
6340 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
6341 struct iscsi_bus_flash_conn *fnode_conn)
6342 {
6343 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6344 struct scsi_qla_host *ha = to_qla_host(shost);
6345 struct dev_db_entry *fw_ddb_entry = NULL;
6346 dma_addr_t fw_ddb_entry_dma;
6347 uint32_t options = 0;
6348 int ret = 0;
6349
6350 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
6351 ql4_printk(KERN_ERR, ha,
6352 "%s: Target info is not persistent\n", __func__);
6353 ret = -EIO;
6354 goto exit_ddb_login;
6355 }
6356
6357 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6358 &fw_ddb_entry_dma, GFP_KERNEL);
6359 if (!fw_ddb_entry) {
6360 DEBUG2(ql4_printk(KERN_ERR, ha,
6361 "%s: Unable to allocate dma buffer\n",
6362 __func__));
6363 ret = -ENOMEM;
6364 goto exit_ddb_login;
6365 }
6366
6367 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6368 options |= IPV6_DEFAULT_DDB_ENTRY;
6369
6370 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6371 if (ret == QLA_ERROR)
6372 goto exit_ddb_login;
6373
6374 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6375 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6376
6377 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
6378 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
6379 fnode_sess->target_id);
6380 else
6381 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
6382 fnode_sess->target_id);
6383
6384 if (ret > 0)
6385 ret = -EIO;
6386
6387 exit_ddb_login:
6388 if (fw_ddb_entry)
6389 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6390 fw_ddb_entry, fw_ddb_entry_dma);
6391 return ret;
6392 }
6393
6394 /**
6395 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
6396 * @cls_sess: pointer to session to be logged out
6397 *
6398 * This performs session log out from the specified target
6399 **/
6400 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
6401 {
6402 struct iscsi_session *sess;
6403 struct ddb_entry *ddb_entry = NULL;
6404 struct scsi_qla_host *ha;
6405 struct dev_db_entry *fw_ddb_entry = NULL;
6406 dma_addr_t fw_ddb_entry_dma;
6407 unsigned long flags;
6408 unsigned long wtime;
6409 uint32_t ddb_state;
6410 int options;
6411 int ret = 0;
6412
6413 sess = cls_sess->dd_data;
6414 ddb_entry = sess->dd_data;
6415 ha = ddb_entry->ha;
6416
6417 if (ddb_entry->ddb_type != FLASH_DDB) {
6418 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
6419 __func__);
6420 ret = -ENXIO;
6421 goto exit_ddb_logout;
6422 }
6423
6424 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6425 ql4_printk(KERN_ERR, ha,
6426 "%s: Logout from boot target entry is not permitted.\n",
6427 __func__);
6428 ret = -EPERM;
6429 goto exit_ddb_logout;
6430 }
6431
6432 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6433 &fw_ddb_entry_dma, GFP_KERNEL);
6434 if (!fw_ddb_entry) {
6435 ql4_printk(KERN_ERR, ha,
6436 "%s: Unable to allocate dma buffer\n", __func__);
6437 ret = -ENOMEM;
6438 goto exit_ddb_logout;
6439 }
6440
6441 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
6442 goto ddb_logout_init;
6443
6444 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6445 fw_ddb_entry, fw_ddb_entry_dma,
6446 NULL, NULL, &ddb_state, NULL,
6447 NULL, NULL);
6448 if (ret == QLA_ERROR)
6449 goto ddb_logout_init;
6450
6451 if (ddb_state == DDB_DS_SESSION_ACTIVE)
6452 goto ddb_logout_init;
6453
6454 /* wait until next relogin is triggered using DF_RELOGIN and
6455 * clear DF_RELOGIN to avoid invocation of further relogin
6456 */
6457 wtime = jiffies + (HZ * RELOGIN_TOV);
6458 do {
6459 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
6460 goto ddb_logout_init;
6461
6462 schedule_timeout_uninterruptible(HZ);
6463 } while ((time_after(wtime, jiffies)));
6464
6465 ddb_logout_init:
6466 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6467 atomic_set(&ddb_entry->relogin_timer, 0);
6468
6469 options = LOGOUT_OPTION_CLOSE_SESSION;
6470 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
6471
6472 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
6473 wtime = jiffies + (HZ * LOGOUT_TOV);
6474 do {
6475 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6476 fw_ddb_entry, fw_ddb_entry_dma,
6477 NULL, NULL, &ddb_state, NULL,
6478 NULL, NULL);
6479 if (ret == QLA_ERROR)
6480 goto ddb_logout_clr_sess;
6481
6482 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
6483 (ddb_state == DDB_DS_SESSION_FAILED))
6484 goto ddb_logout_clr_sess;
6485
6486 schedule_timeout_uninterruptible(HZ);
6487 } while ((time_after(wtime, jiffies)));
6488
6489 ddb_logout_clr_sess:
6490 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
6491 /*
6492 * we have decremented the reference count of the driver
6493 * when we setup the session to have the driver unload
6494 * to be seamless without actually destroying the
6495 * session
6496 **/
6497 try_module_get(qla4xxx_iscsi_transport.owner);
6498 iscsi_destroy_endpoint(ddb_entry->conn->ep);
6499
6500 spin_lock_irqsave(&ha->hardware_lock, flags);
6501 qla4xxx_free_ddb(ha, ddb_entry);
6502 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
6503 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6504
6505 iscsi_session_teardown(ddb_entry->sess);
6506
6507 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
6508 ret = QLA_SUCCESS;
6509
6510 exit_ddb_logout:
6511 if (fw_ddb_entry)
6512 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6513 fw_ddb_entry, fw_ddb_entry_dma);
6514 return ret;
6515 }
6516
6517 /**
6518 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
6519 * @fnode_sess: pointer to session attrs of flash ddb entry
6520 * @fnode_conn: pointer to connection attrs of flash ddb entry
6521 *
6522 * This performs log out from the specified target
6523 **/
6524 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
6525 struct iscsi_bus_flash_conn *fnode_conn)
6526 {
6527 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6528 struct scsi_qla_host *ha = to_qla_host(shost);
6529 struct ql4_tuple_ddb *flash_tddb = NULL;
6530 struct ql4_tuple_ddb *tmp_tddb = NULL;
6531 struct dev_db_entry *fw_ddb_entry = NULL;
6532 struct ddb_entry *ddb_entry = NULL;
6533 dma_addr_t fw_ddb_dma;
6534 uint32_t next_idx = 0;
6535 uint32_t state = 0, conn_err = 0;
6536 uint16_t conn_id = 0;
6537 int idx, index;
6538 int status, ret = 0;
6539
6540 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6541 &fw_ddb_dma);
6542 if (fw_ddb_entry == NULL) {
6543 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6544 ret = -ENOMEM;
6545 goto exit_ddb_logout;
6546 }
6547
6548 flash_tddb = vzalloc(sizeof(*flash_tddb));
6549 if (!flash_tddb) {
6550 ql4_printk(KERN_WARNING, ha,
6551 "%s:Memory Allocation failed.\n", __func__);
6552 ret = -ENOMEM;
6553 goto exit_ddb_logout;
6554 }
6555
6556 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6557 if (!tmp_tddb) {
6558 ql4_printk(KERN_WARNING, ha,
6559 "%s:Memory Allocation failed.\n", __func__);
6560 ret = -ENOMEM;
6561 goto exit_ddb_logout;
6562 }
6563
6564 if (!fnode_sess->targetname) {
6565 ql4_printk(KERN_ERR, ha,
6566 "%s:Cannot logout from SendTarget entry\n",
6567 __func__);
6568 ret = -EPERM;
6569 goto exit_ddb_logout;
6570 }
6571
6572 if (fnode_sess->is_boot_target) {
6573 ql4_printk(KERN_ERR, ha,
6574 "%s: Logout from boot target entry is not permitted.\n",
6575 __func__);
6576 ret = -EPERM;
6577 goto exit_ddb_logout;
6578 }
6579
6580 strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6581 ISCSI_NAME_SIZE);
6582
6583 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6584 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6585 else
6586 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6587
6588 flash_tddb->tpgt = fnode_sess->tpgt;
6589 flash_tddb->port = fnode_conn->port;
6590
6591 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6592
6593 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6594 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6595 if (ddb_entry == NULL)
6596 continue;
6597
6598 if (ddb_entry->ddb_type != FLASH_DDB)
6599 continue;
6600
6601 index = ddb_entry->sess->target_id;
6602 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6603 fw_ddb_dma, NULL, &next_idx,
6604 &state, &conn_err, NULL,
6605 &conn_id);
6606 if (status == QLA_ERROR) {
6607 ret = -ENOMEM;
6608 break;
6609 }
6610
6611 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6612
6613 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6614 true);
6615 if (status == QLA_SUCCESS) {
6616 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6617 break;
6618 }
6619 }
6620
6621 if (idx == MAX_DDB_ENTRIES)
6622 ret = -ESRCH;
6623
6624 exit_ddb_logout:
6625 if (flash_tddb)
6626 vfree(flash_tddb);
6627 if (tmp_tddb)
6628 vfree(tmp_tddb);
6629 if (fw_ddb_entry)
6630 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6631
6632 return ret;
6633 }
6634
6635 static int
6636 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6637 int param, char *buf)
6638 {
6639 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6640 struct scsi_qla_host *ha = to_qla_host(shost);
6641 struct iscsi_bus_flash_conn *fnode_conn;
6642 struct ql4_chap_table chap_tbl;
6643 struct device *dev;
6644 int parent_type;
6645 int rc = 0;
6646
6647 dev = iscsi_find_flashnode_conn(fnode_sess);
6648 if (!dev)
6649 return -EIO;
6650
6651 fnode_conn = iscsi_dev_to_flash_conn(dev);
6652
6653 switch (param) {
6654 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6655 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6656 break;
6657 case ISCSI_FLASHNODE_PORTAL_TYPE:
6658 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6659 break;
6660 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6661 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6662 break;
6663 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6664 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6665 break;
6666 case ISCSI_FLASHNODE_ENTRY_EN:
6667 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6668 break;
6669 case ISCSI_FLASHNODE_HDR_DGST_EN:
6670 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6671 break;
6672 case ISCSI_FLASHNODE_DATA_DGST_EN:
6673 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6674 break;
6675 case ISCSI_FLASHNODE_IMM_DATA_EN:
6676 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6677 break;
6678 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6679 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6680 break;
6681 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6682 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6683 break;
6684 case ISCSI_FLASHNODE_PDU_INORDER:
6685 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6686 break;
6687 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6688 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6689 break;
6690 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6691 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6692 break;
6693 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6694 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6695 break;
6696 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6697 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6698 break;
6699 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6700 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6701 break;
6702 case ISCSI_FLASHNODE_ERL:
6703 rc = sprintf(buf, "%u\n", fnode_sess->erl);
6704 break;
6705 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6706 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6707 break;
6708 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6709 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6710 break;
6711 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6712 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6713 break;
6714 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6715 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6716 break;
6717 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6718 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6719 break;
6720 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6721 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6722 break;
6723 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6724 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6725 break;
6726 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6727 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6728 break;
6729 case ISCSI_FLASHNODE_FIRST_BURST:
6730 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6731 break;
6732 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6733 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6734 break;
6735 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6736 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6737 break;
6738 case ISCSI_FLASHNODE_MAX_R2T:
6739 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6740 break;
6741 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6742 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6743 break;
6744 case ISCSI_FLASHNODE_ISID:
6745 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6746 fnode_sess->isid[0], fnode_sess->isid[1],
6747 fnode_sess->isid[2], fnode_sess->isid[3],
6748 fnode_sess->isid[4], fnode_sess->isid[5]);
6749 break;
6750 case ISCSI_FLASHNODE_TSID:
6751 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6752 break;
6753 case ISCSI_FLASHNODE_PORT:
6754 rc = sprintf(buf, "%d\n", fnode_conn->port);
6755 break;
6756 case ISCSI_FLASHNODE_MAX_BURST:
6757 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6758 break;
6759 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6760 rc = sprintf(buf, "%u\n",
6761 fnode_sess->default_taskmgmt_timeout);
6762 break;
6763 case ISCSI_FLASHNODE_IPADDR:
6764 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6765 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6766 else
6767 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6768 break;
6769 case ISCSI_FLASHNODE_ALIAS:
6770 if (fnode_sess->targetalias)
6771 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6772 else
6773 rc = sprintf(buf, "\n");
6774 break;
6775 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6776 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6777 rc = sprintf(buf, "%pI6\n",
6778 fnode_conn->redirect_ipaddr);
6779 else
6780 rc = sprintf(buf, "%pI4\n",
6781 fnode_conn->redirect_ipaddr);
6782 break;
6783 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6784 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6785 break;
6786 case ISCSI_FLASHNODE_LOCAL_PORT:
6787 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6788 break;
6789 case ISCSI_FLASHNODE_IPV4_TOS:
6790 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6791 break;
6792 case ISCSI_FLASHNODE_IPV6_TC:
6793 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6794 rc = sprintf(buf, "%u\n",
6795 fnode_conn->ipv6_traffic_class);
6796 else
6797 rc = sprintf(buf, "\n");
6798 break;
6799 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6800 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6801 break;
6802 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6803 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6804 rc = sprintf(buf, "%pI6\n",
6805 fnode_conn->link_local_ipv6_addr);
6806 else
6807 rc = sprintf(buf, "\n");
6808 break;
6809 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6810 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
6811 break;
6812 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6813 if (fnode_sess->discovery_parent_type == DDB_ISNS)
6814 parent_type = ISCSI_DISC_PARENT_ISNS;
6815 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6816 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6817 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6818 parent_type = ISCSI_DISC_PARENT_SENDTGT;
6819 else
6820 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6821
6822 rc = sprintf(buf, "%s\n",
6823 iscsi_get_discovery_parent_name(parent_type));
6824 break;
6825 case ISCSI_FLASHNODE_NAME:
6826 if (fnode_sess->targetname)
6827 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6828 else
6829 rc = sprintf(buf, "\n");
6830 break;
6831 case ISCSI_FLASHNODE_TPGT:
6832 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6833 break;
6834 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6835 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6836 break;
6837 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6838 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6839 break;
6840 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6841 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6842 break;
6843 case ISCSI_FLASHNODE_USERNAME:
6844 if (fnode_sess->chap_auth_en) {
6845 qla4xxx_get_uni_chap_at_index(ha,
6846 chap_tbl.name,
6847 chap_tbl.secret,
6848 fnode_sess->chap_out_idx);
6849 rc = sprintf(buf, "%s\n", chap_tbl.name);
6850 } else {
6851 rc = sprintf(buf, "\n");
6852 }
6853 break;
6854 case ISCSI_FLASHNODE_PASSWORD:
6855 if (fnode_sess->chap_auth_en) {
6856 qla4xxx_get_uni_chap_at_index(ha,
6857 chap_tbl.name,
6858 chap_tbl.secret,
6859 fnode_sess->chap_out_idx);
6860 rc = sprintf(buf, "%s\n", chap_tbl.secret);
6861 } else {
6862 rc = sprintf(buf, "\n");
6863 }
6864 break;
6865 case ISCSI_FLASHNODE_STATSN:
6866 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6867 break;
6868 case ISCSI_FLASHNODE_EXP_STATSN:
6869 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6870 break;
6871 case ISCSI_FLASHNODE_IS_BOOT_TGT:
6872 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6873 break;
6874 default:
6875 rc = -ENOSYS;
6876 break;
6877 }
6878
6879 put_device(dev);
6880 return rc;
6881 }
6882
6883 /**
6884 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6885 * @fnode_sess: pointer to session attrs of flash ddb entry
6886 * @fnode_conn: pointer to connection attrs of flash ddb entry
6887 * @data: Parameters and their values to update
6888 * @len: len of data
6889 *
6890 * This sets the parameter of flash ddb entry and writes them to flash
6891 **/
6892 static int
6893 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6894 struct iscsi_bus_flash_conn *fnode_conn,
6895 void *data, int len)
6896 {
6897 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6898 struct scsi_qla_host *ha = to_qla_host(shost);
6899 struct iscsi_flashnode_param_info *fnode_param;
6900 struct ql4_chap_table chap_tbl;
6901 struct nlattr *attr;
6902 uint16_t chap_out_idx = INVALID_ENTRY;
6903 int rc = QLA_ERROR;
6904 uint32_t rem = len;
6905
6906 memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
6907 nla_for_each_attr(attr, data, len, rem) {
6908 fnode_param = nla_data(attr);
6909
6910 switch (fnode_param->param) {
6911 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6912 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6913 break;
6914 case ISCSI_FLASHNODE_PORTAL_TYPE:
6915 memcpy(fnode_sess->portal_type, fnode_param->value,
6916 strlen(fnode_sess->portal_type));
6917 break;
6918 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6919 fnode_sess->auto_snd_tgt_disable =
6920 fnode_param->value[0];
6921 break;
6922 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6923 fnode_sess->discovery_sess = fnode_param->value[0];
6924 break;
6925 case ISCSI_FLASHNODE_ENTRY_EN:
6926 fnode_sess->entry_state = fnode_param->value[0];
6927 break;
6928 case ISCSI_FLASHNODE_HDR_DGST_EN:
6929 fnode_conn->hdrdgst_en = fnode_param->value[0];
6930 break;
6931 case ISCSI_FLASHNODE_DATA_DGST_EN:
6932 fnode_conn->datadgst_en = fnode_param->value[0];
6933 break;
6934 case ISCSI_FLASHNODE_IMM_DATA_EN:
6935 fnode_sess->imm_data_en = fnode_param->value[0];
6936 break;
6937 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6938 fnode_sess->initial_r2t_en = fnode_param->value[0];
6939 break;
6940 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6941 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6942 break;
6943 case ISCSI_FLASHNODE_PDU_INORDER:
6944 fnode_sess->pdu_inorder_en = fnode_param->value[0];
6945 break;
6946 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6947 fnode_sess->chap_auth_en = fnode_param->value[0];
6948 /* Invalidate chap index if chap auth is disabled */
6949 if (!fnode_sess->chap_auth_en)
6950 fnode_sess->chap_out_idx = INVALID_ENTRY;
6951
6952 break;
6953 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6954 fnode_conn->snack_req_en = fnode_param->value[0];
6955 break;
6956 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6957 fnode_sess->discovery_logout_en = fnode_param->value[0];
6958 break;
6959 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6960 fnode_sess->bidi_chap_en = fnode_param->value[0];
6961 break;
6962 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6963 fnode_sess->discovery_auth_optional =
6964 fnode_param->value[0];
6965 break;
6966 case ISCSI_FLASHNODE_ERL:
6967 fnode_sess->erl = fnode_param->value[0];
6968 break;
6969 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6970 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
6971 break;
6972 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6973 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
6974 break;
6975 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6976 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
6977 break;
6978 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6979 fnode_conn->tcp_timer_scale = fnode_param->value[0];
6980 break;
6981 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6982 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
6983 break;
6984 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6985 fnode_conn->fragment_disable = fnode_param->value[0];
6986 break;
6987 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6988 fnode_conn->max_recv_dlength =
6989 *(unsigned *)fnode_param->value;
6990 break;
6991 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6992 fnode_conn->max_xmit_dlength =
6993 *(unsigned *)fnode_param->value;
6994 break;
6995 case ISCSI_FLASHNODE_FIRST_BURST:
6996 fnode_sess->first_burst =
6997 *(unsigned *)fnode_param->value;
6998 break;
6999 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
7000 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
7001 break;
7002 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
7003 fnode_sess->time2retain =
7004 *(uint16_t *)fnode_param->value;
7005 break;
7006 case ISCSI_FLASHNODE_MAX_R2T:
7007 fnode_sess->max_r2t =
7008 *(uint16_t *)fnode_param->value;
7009 break;
7010 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
7011 fnode_conn->keepalive_timeout =
7012 *(uint16_t *)fnode_param->value;
7013 break;
7014 case ISCSI_FLASHNODE_ISID:
7015 memcpy(fnode_sess->isid, fnode_param->value,
7016 sizeof(fnode_sess->isid));
7017 break;
7018 case ISCSI_FLASHNODE_TSID:
7019 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
7020 break;
7021 case ISCSI_FLASHNODE_PORT:
7022 fnode_conn->port = *(uint16_t *)fnode_param->value;
7023 break;
7024 case ISCSI_FLASHNODE_MAX_BURST:
7025 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
7026 break;
7027 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
7028 fnode_sess->default_taskmgmt_timeout =
7029 *(uint16_t *)fnode_param->value;
7030 break;
7031 case ISCSI_FLASHNODE_IPADDR:
7032 memcpy(fnode_conn->ipaddress, fnode_param->value,
7033 IPv6_ADDR_LEN);
7034 break;
7035 case ISCSI_FLASHNODE_ALIAS:
7036 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
7037 (char *)fnode_param->value);
7038 break;
7039 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
7040 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
7041 IPv6_ADDR_LEN);
7042 break;
7043 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
7044 fnode_conn->max_segment_size =
7045 *(unsigned *)fnode_param->value;
7046 break;
7047 case ISCSI_FLASHNODE_LOCAL_PORT:
7048 fnode_conn->local_port =
7049 *(uint16_t *)fnode_param->value;
7050 break;
7051 case ISCSI_FLASHNODE_IPV4_TOS:
7052 fnode_conn->ipv4_tos = fnode_param->value[0];
7053 break;
7054 case ISCSI_FLASHNODE_IPV6_TC:
7055 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
7056 break;
7057 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
7058 fnode_conn->ipv6_flow_label = fnode_param->value[0];
7059 break;
7060 case ISCSI_FLASHNODE_NAME:
7061 rc = iscsi_switch_str_param(&fnode_sess->targetname,
7062 (char *)fnode_param->value);
7063 break;
7064 case ISCSI_FLASHNODE_TPGT:
7065 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
7066 break;
7067 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
7068 memcpy(fnode_conn->link_local_ipv6_addr,
7069 fnode_param->value, IPv6_ADDR_LEN);
7070 break;
7071 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
7072 fnode_sess->discovery_parent_idx =
7073 *(uint16_t *)fnode_param->value;
7074 break;
7075 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
7076 fnode_conn->tcp_xmit_wsf =
7077 *(uint8_t *)fnode_param->value;
7078 break;
7079 case ISCSI_FLASHNODE_TCP_RECV_WSF:
7080 fnode_conn->tcp_recv_wsf =
7081 *(uint8_t *)fnode_param->value;
7082 break;
7083 case ISCSI_FLASHNODE_STATSN:
7084 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
7085 break;
7086 case ISCSI_FLASHNODE_EXP_STATSN:
7087 fnode_conn->exp_statsn =
7088 *(uint32_t *)fnode_param->value;
7089 break;
7090 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
7091 chap_out_idx = *(uint16_t *)fnode_param->value;
7092 if (!qla4xxx_get_uni_chap_at_index(ha,
7093 chap_tbl.name,
7094 chap_tbl.secret,
7095 chap_out_idx)) {
7096 fnode_sess->chap_out_idx = chap_out_idx;
7097 /* Enable chap auth if chap index is valid */
7098 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
7099 }
7100 break;
7101 default:
7102 ql4_printk(KERN_ERR, ha,
7103 "%s: No such sysfs attribute\n", __func__);
7104 rc = -ENOSYS;
7105 goto exit_set_param;
7106 }
7107 }
7108
7109 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
7110
7111 exit_set_param:
7112 return rc;
7113 }
7114
7115 /**
7116 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
7117 * @fnode_sess: pointer to session attrs of flash ddb entry
7118 *
7119 * This invalidates the flash ddb entry at the given index
7120 **/
7121 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
7122 {
7123 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7124 struct scsi_qla_host *ha = to_qla_host(shost);
7125 uint32_t dev_db_start_offset;
7126 uint32_t dev_db_end_offset;
7127 struct dev_db_entry *fw_ddb_entry = NULL;
7128 dma_addr_t fw_ddb_entry_dma;
7129 uint16_t *ddb_cookie = NULL;
7130 size_t ddb_size = 0;
7131 void *pddb = NULL;
7132 int target_id;
7133 int rc = 0;
7134
7135 if (fnode_sess->is_boot_target) {
7136 rc = -EPERM;
7137 DEBUG2(ql4_printk(KERN_ERR, ha,
7138 "%s: Deletion of boot target entry is not permitted.\n",
7139 __func__));
7140 goto exit_ddb_del;
7141 }
7142
7143 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
7144 goto sysfs_ddb_del;
7145
7146 if (is_qla40XX(ha)) {
7147 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
7148 dev_db_end_offset = FLASH_OFFSET_DB_END;
7149 dev_db_start_offset += (fnode_sess->target_id *
7150 sizeof(*fw_ddb_entry));
7151 ddb_size = sizeof(*fw_ddb_entry);
7152 } else {
7153 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
7154 (ha->hw.flt_region_ddb << 2);
7155 /* flt_ddb_size is DDB table size for both ports
7156 * so divide it by 2 to calculate the offset for second port
7157 */
7158 if (ha->port_num == 1)
7159 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
7160
7161 dev_db_end_offset = dev_db_start_offset +
7162 (ha->hw.flt_ddb_size / 2);
7163
7164 dev_db_start_offset += (fnode_sess->target_id *
7165 sizeof(*fw_ddb_entry));
7166 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
7167
7168 ddb_size = sizeof(*ddb_cookie);
7169 }
7170
7171 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
7172 __func__, dev_db_start_offset, dev_db_end_offset));
7173
7174 if (dev_db_start_offset > dev_db_end_offset) {
7175 rc = -EIO;
7176 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
7177 __func__, fnode_sess->target_id));
7178 goto exit_ddb_del;
7179 }
7180
7181 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
7182 &fw_ddb_entry_dma, GFP_KERNEL);
7183 if (!pddb) {
7184 rc = -ENOMEM;
7185 DEBUG2(ql4_printk(KERN_ERR, ha,
7186 "%s: Unable to allocate dma buffer\n",
7187 __func__));
7188 goto exit_ddb_del;
7189 }
7190
7191 if (is_qla40XX(ha)) {
7192 fw_ddb_entry = pddb;
7193 memset(fw_ddb_entry, 0, ddb_size);
7194 ddb_cookie = &fw_ddb_entry->cookie;
7195 } else {
7196 ddb_cookie = pddb;
7197 }
7198
7199 /* invalidate the cookie */
7200 *ddb_cookie = 0xFFEE;
7201 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7202 ddb_size, FLASH_OPT_RMW_COMMIT);
7203
7204 sysfs_ddb_del:
7205 target_id = fnode_sess->target_id;
7206 iscsi_destroy_flashnode_sess(fnode_sess);
7207 ql4_printk(KERN_INFO, ha,
7208 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
7209 __func__, target_id, ha->host_no);
7210 exit_ddb_del:
7211 if (pddb)
7212 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
7213 fw_ddb_entry_dma);
7214 return rc;
7215 }
7216
7217 /**
7218 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
7219 * @ha: pointer to adapter structure
7220 *
7221 * Export the firmware DDB for all send targets and normal targets to sysfs.
7222 **/
7223 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
7224 {
7225 struct dev_db_entry *fw_ddb_entry = NULL;
7226 dma_addr_t fw_ddb_entry_dma;
7227 uint16_t max_ddbs;
7228 uint16_t idx = 0;
7229 int ret = QLA_SUCCESS;
7230
7231 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
7232 sizeof(*fw_ddb_entry),
7233 &fw_ddb_entry_dma, GFP_KERNEL);
7234 if (!fw_ddb_entry) {
7235 DEBUG2(ql4_printk(KERN_ERR, ha,
7236 "%s: Unable to allocate dma buffer\n",
7237 __func__));
7238 return -ENOMEM;
7239 }
7240
7241 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7242 MAX_DEV_DB_ENTRIES;
7243
7244 for (idx = 0; idx < max_ddbs; idx++) {
7245 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
7246 idx))
7247 continue;
7248
7249 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
7250 if (ret) {
7251 ret = -EIO;
7252 break;
7253 }
7254 }
7255
7256 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
7257 fw_ddb_entry_dma);
7258
7259 return ret;
7260 }
7261
7262 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
7263 {
7264 iscsi_destroy_all_flashnode(ha->host);
7265 }
7266
7267 /**
7268 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
7269 * @ha: pointer to adapter structure
7270 * @is_reset: Is this init path or reset path
7271 *
7272 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
7273 * using connection open, then create the list of normal targets (nt)
7274 * from firmware DDBs. Based on the list of nt setup session and connection
7275 * objects.
7276 **/
7277 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
7278 {
7279 uint16_t tmo = 0;
7280 struct list_head list_st, list_nt;
7281 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
7282 unsigned long wtime;
7283
7284 if (!test_bit(AF_LINK_UP, &ha->flags)) {
7285 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
7286 ha->is_reset = is_reset;
7287 return;
7288 }
7289
7290 INIT_LIST_HEAD(&list_st);
7291 INIT_LIST_HEAD(&list_nt);
7292
7293 qla4xxx_build_st_list(ha, &list_st);
7294
7295 /* Before issuing conn open mbox, ensure all IPs states are configured
7296 * Note, conn open fails if IPs are not configured
7297 */
7298 qla4xxx_wait_for_ip_configuration(ha);
7299
7300 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
7301 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
7302 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
7303 }
7304
7305 /* Wait to ensure all sendtargets are done for min 12 sec wait */
7306 tmo = ((ha->def_timeout > LOGIN_TOV) &&
7307 (ha->def_timeout < LOGIN_TOV * 10) ?
7308 ha->def_timeout : LOGIN_TOV);
7309
7310 DEBUG2(ql4_printk(KERN_INFO, ha,
7311 "Default time to wait for build ddb %d\n", tmo));
7312
7313 wtime = jiffies + (HZ * tmo);
7314 do {
7315 if (list_empty(&list_st))
7316 break;
7317
7318 qla4xxx_remove_failed_ddb(ha, &list_st);
7319 schedule_timeout_uninterruptible(HZ / 10);
7320 } while (time_after(wtime, jiffies));
7321
7322
7323 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
7324
7325 qla4xxx_free_ddb_list(&list_st);
7326 qla4xxx_free_ddb_list(&list_nt);
7327
7328 qla4xxx_free_ddb_index(ha);
7329 }
7330
7331 /**
7332 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
7333 * response.
7334 * @ha: pointer to adapter structure
7335 *
7336 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
7337 * set in DDB and we will wait for login response of boot targets during
7338 * probe.
7339 **/
7340 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
7341 {
7342 struct ddb_entry *ddb_entry;
7343 struct dev_db_entry *fw_ddb_entry = NULL;
7344 dma_addr_t fw_ddb_entry_dma;
7345 unsigned long wtime;
7346 uint32_t ddb_state;
7347 int max_ddbs, idx, ret;
7348
7349 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7350 MAX_DEV_DB_ENTRIES;
7351
7352 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7353 &fw_ddb_entry_dma, GFP_KERNEL);
7354 if (!fw_ddb_entry) {
7355 ql4_printk(KERN_ERR, ha,
7356 "%s: Unable to allocate dma buffer\n", __func__);
7357 goto exit_login_resp;
7358 }
7359
7360 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
7361
7362 for (idx = 0; idx < max_ddbs; idx++) {
7363 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7364 if (ddb_entry == NULL)
7365 continue;
7366
7367 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7368 DEBUG2(ql4_printk(KERN_INFO, ha,
7369 "%s: DDB index [%d]\n", __func__,
7370 ddb_entry->fw_ddb_index));
7371 do {
7372 ret = qla4xxx_get_fwddb_entry(ha,
7373 ddb_entry->fw_ddb_index,
7374 fw_ddb_entry, fw_ddb_entry_dma,
7375 NULL, NULL, &ddb_state, NULL,
7376 NULL, NULL);
7377 if (ret == QLA_ERROR)
7378 goto exit_login_resp;
7379
7380 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
7381 (ddb_state == DDB_DS_SESSION_FAILED))
7382 break;
7383
7384 schedule_timeout_uninterruptible(HZ);
7385
7386 } while ((time_after(wtime, jiffies)));
7387
7388 if (!time_after(wtime, jiffies)) {
7389 DEBUG2(ql4_printk(KERN_INFO, ha,
7390 "%s: Login response wait timer expired\n",
7391 __func__));
7392 goto exit_login_resp;
7393 }
7394 }
7395 }
7396
7397 exit_login_resp:
7398 if (fw_ddb_entry)
7399 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7400 fw_ddb_entry, fw_ddb_entry_dma);
7401 }
7402
7403 /**
7404 * qla4xxx_probe_adapter - callback function to probe HBA
7405 * @pdev: pointer to pci_dev structure
7406 * @pci_device_id: pointer to pci_device entry
7407 *
7408 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
7409 * It returns zero if successful. It also initializes all data necessary for
7410 * the driver.
7411 **/
7412 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
7413 const struct pci_device_id *ent)
7414 {
7415 int ret = -ENODEV, status;
7416 struct Scsi_Host *host;
7417 struct scsi_qla_host *ha;
7418 uint8_t init_retry_count = 0;
7419 char buf[34];
7420 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
7421 uint32_t dev_state;
7422
7423 if (pci_enable_device(pdev))
7424 return -1;
7425
7426 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
7427 if (host == NULL) {
7428 printk(KERN_WARNING
7429 "qla4xxx: Couldn't allocate host from scsi layer!\n");
7430 goto probe_disable_device;
7431 }
7432
7433 /* Clear our data area */
7434 ha = to_qla_host(host);
7435 memset(ha, 0, sizeof(*ha));
7436
7437 /* Save the information from PCI BIOS. */
7438 ha->pdev = pdev;
7439 ha->host = host;
7440 ha->host_no = host->host_no;
7441 ha->func_num = PCI_FUNC(ha->pdev->devfn);
7442
7443 pci_enable_pcie_error_reporting(pdev);
7444
7445 /* Setup Runtime configurable options */
7446 if (is_qla8022(ha)) {
7447 ha->isp_ops = &qla4_82xx_isp_ops;
7448 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
7449 ha->qdr_sn_window = -1;
7450 ha->ddr_mn_window = -1;
7451 ha->curr_window = 255;
7452 nx_legacy_intr = &legacy_intr[ha->func_num];
7453 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
7454 ha->nx_legacy_intr.tgt_status_reg =
7455 nx_legacy_intr->tgt_status_reg;
7456 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
7457 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
7458 } else if (is_qla8032(ha) || is_qla8042(ha)) {
7459 ha->isp_ops = &qla4_83xx_isp_ops;
7460 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
7461 } else {
7462 ha->isp_ops = &qla4xxx_isp_ops;
7463 }
7464
7465 if (is_qla80XX(ha)) {
7466 rwlock_init(&ha->hw_lock);
7467 ha->pf_bit = ha->func_num << 16;
7468 /* Set EEH reset type to fundamental if required by hba */
7469 pdev->needs_freset = 1;
7470 }
7471
7472 /* Configure PCI I/O space. */
7473 ret = ha->isp_ops->iospace_config(ha);
7474 if (ret)
7475 goto probe_failed_ioconfig;
7476
7477 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
7478 pdev->device, pdev->irq, ha->reg);
7479
7480 qla4xxx_config_dma_addressing(ha);
7481
7482 /* Initialize lists and spinlocks. */
7483 INIT_LIST_HEAD(&ha->free_srb_q);
7484
7485 mutex_init(&ha->mbox_sem);
7486 mutex_init(&ha->chap_sem);
7487 init_completion(&ha->mbx_intr_comp);
7488 init_completion(&ha->disable_acb_comp);
7489
7490 spin_lock_init(&ha->hardware_lock);
7491 spin_lock_init(&ha->work_lock);
7492
7493 /* Initialize work list */
7494 INIT_LIST_HEAD(&ha->work_list);
7495
7496 /* Allocate dma buffers */
7497 if (qla4xxx_mem_alloc(ha)) {
7498 ql4_printk(KERN_WARNING, ha,
7499 "[ERROR] Failed to allocate memory for adapter\n");
7500
7501 ret = -ENOMEM;
7502 goto probe_failed;
7503 }
7504
7505 host->cmd_per_lun = 3;
7506 host->max_channel = 0;
7507 host->max_lun = MAX_LUNS - 1;
7508 host->max_id = MAX_TARGETS;
7509 host->max_cmd_len = IOCB_MAX_CDB_LEN;
7510 host->can_queue = MAX_SRBS ;
7511 host->transportt = qla4xxx_scsi_transport;
7512
7513 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
7514 if (ret) {
7515 ql4_printk(KERN_WARNING, ha,
7516 "%s: scsi_init_shared_tag_map failed\n", __func__);
7517 goto probe_failed;
7518 }
7519
7520 pci_set_drvdata(pdev, ha);
7521
7522 ret = scsi_add_host(host, &pdev->dev);
7523 if (ret)
7524 goto probe_failed;
7525
7526 if (is_qla80XX(ha))
7527 qla4_8xxx_get_flash_info(ha);
7528
7529 if (is_qla8032(ha) || is_qla8042(ha)) {
7530 qla4_83xx_read_reset_template(ha);
7531 /*
7532 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
7533 * If DONRESET_BIT0 is set, drivers should not set dev_state
7534 * to NEED_RESET. But if NEED_RESET is set, drivers should
7535 * should honor the reset.
7536 */
7537 if (ql4xdontresethba == 1)
7538 qla4_83xx_set_idc_dontreset(ha);
7539 }
7540
7541 /*
7542 * Initialize the Host adapter request/response queues and
7543 * firmware
7544 * NOTE: interrupts enabled upon successful completion
7545 */
7546 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7547
7548 /* Dont retry adapter initialization if IRQ allocation failed */
7549 if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7550 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7551 __func__);
7552 goto skip_retry_init;
7553 }
7554
7555 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
7556 init_retry_count++ < MAX_INIT_RETRIES) {
7557
7558 if (is_qla80XX(ha)) {
7559 ha->isp_ops->idc_lock(ha);
7560 dev_state = qla4_8xxx_rd_direct(ha,
7561 QLA8XXX_CRB_DEV_STATE);
7562 ha->isp_ops->idc_unlock(ha);
7563 if (dev_state == QLA8XXX_DEV_FAILED) {
7564 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
7565 "initialize adapter. H/W is in failed state\n",
7566 __func__);
7567 break;
7568 }
7569 }
7570 DEBUG2(printk("scsi: %s: retrying adapter initialization "
7571 "(%d)\n", __func__, init_retry_count));
7572
7573 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
7574 continue;
7575
7576 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7577 }
7578
7579 skip_retry_init:
7580 if (!test_bit(AF_ONLINE, &ha->flags)) {
7581 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7582
7583 if ((is_qla8022(ha) && ql4xdontresethba) ||
7584 ((is_qla8032(ha) || is_qla8042(ha)) &&
7585 qla4_83xx_idc_dontreset(ha))) {
7586 /* Put the device in failed state. */
7587 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7588 ha->isp_ops->idc_lock(ha);
7589 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7590 QLA8XXX_DEV_FAILED);
7591 ha->isp_ops->idc_unlock(ha);
7592 }
7593 ret = -ENODEV;
7594 goto remove_host;
7595 }
7596
7597 /* Startup the kernel thread for this host adapter. */
7598 DEBUG2(printk("scsi: %s: Starting kernel thread for "
7599 "qla4xxx_dpc\n", __func__));
7600 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
7601 ha->dpc_thread = create_singlethread_workqueue(buf);
7602 if (!ha->dpc_thread) {
7603 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
7604 ret = -ENODEV;
7605 goto remove_host;
7606 }
7607 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
7608
7609 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
7610 ha->host_no);
7611 if (!ha->task_wq) {
7612 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
7613 ret = -ENODEV;
7614 goto remove_host;
7615 }
7616
7617 /*
7618 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
7619 * (which is called indirectly by qla4xxx_initialize_adapter),
7620 * so that irqs will be registered after crbinit but before
7621 * mbx_intr_enable.
7622 */
7623 if (is_qla40XX(ha)) {
7624 ret = qla4xxx_request_irqs(ha);
7625 if (ret) {
7626 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
7627 "interrupt %d already in use.\n", pdev->irq);
7628 goto remove_host;
7629 }
7630 }
7631
7632 pci_save_state(ha->pdev);
7633 ha->isp_ops->enable_intrs(ha);
7634
7635 /* Start timer thread. */
7636 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
7637
7638 set_bit(AF_INIT_DONE, &ha->flags);
7639
7640 qla4_8xxx_alloc_sysfs_attr(ha);
7641
7642 printk(KERN_INFO
7643 " QLogic iSCSI HBA Driver version: %s\n"
7644 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7645 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7646 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
7647 ha->fw_info.fw_patch, ha->fw_info.fw_build);
7648
7649 /* Set the driver version */
7650 if (is_qla80XX(ha))
7651 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
7652
7653 if (qla4xxx_setup_boot_info(ha))
7654 ql4_printk(KERN_ERR, ha,
7655 "%s: No iSCSI boot target configured\n", __func__);
7656
7657 if (qla4xxx_sysfs_ddb_export(ha))
7658 ql4_printk(KERN_ERR, ha,
7659 "%s: Error exporting ddb to sysfs\n", __func__);
7660
7661 /* Perform the build ddb list and login to each */
7662 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
7663 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
7664 qla4xxx_wait_login_resp_boot_tgt(ha);
7665
7666 qla4xxx_create_chap_list(ha);
7667
7668 qla4xxx_create_ifaces(ha);
7669 return 0;
7670
7671 remove_host:
7672 scsi_remove_host(ha->host);
7673
7674 probe_failed:
7675 qla4xxx_free_adapter(ha);
7676
7677 probe_failed_ioconfig:
7678 pci_disable_pcie_error_reporting(pdev);
7679 scsi_host_put(ha->host);
7680
7681 probe_disable_device:
7682 pci_disable_device(pdev);
7683
7684 return ret;
7685 }
7686
7687 /**
7688 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
7689 * @ha: pointer to adapter structure
7690 *
7691 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
7692 * so that the other port will not re-initialize while in the process of
7693 * removing the ha due to driver unload or hba hotplug.
7694 **/
7695 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
7696 {
7697 struct scsi_qla_host *other_ha = NULL;
7698 struct pci_dev *other_pdev = NULL;
7699 int fn = ISP4XXX_PCI_FN_2;
7700
7701 /*iscsi function numbers for ISP4xxx is 1 and 3*/
7702 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
7703 fn = ISP4XXX_PCI_FN_1;
7704
7705 other_pdev =
7706 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7707 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7708 fn));
7709
7710 /* Get other_ha if other_pdev is valid and state is enable*/
7711 if (other_pdev) {
7712 if (atomic_read(&other_pdev->enable_cnt)) {
7713 other_ha = pci_get_drvdata(other_pdev);
7714 if (other_ha) {
7715 set_bit(AF_HA_REMOVAL, &other_ha->flags);
7716 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
7717 "Prevent %s reinit\n", __func__,
7718 dev_name(&other_ha->pdev->dev)));
7719 }
7720 }
7721 pci_dev_put(other_pdev);
7722 }
7723 }
7724
7725 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
7726 {
7727 struct ddb_entry *ddb_entry;
7728 int options;
7729 int idx;
7730
7731 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7732
7733 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7734 if ((ddb_entry != NULL) &&
7735 (ddb_entry->ddb_type == FLASH_DDB)) {
7736
7737 options = LOGOUT_OPTION_CLOSE_SESSION;
7738 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
7739 == QLA_ERROR)
7740 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
7741 __func__);
7742
7743 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7744 /*
7745 * we have decremented the reference count of the driver
7746 * when we setup the session to have the driver unload
7747 * to be seamless without actually destroying the
7748 * session
7749 **/
7750 try_module_get(qla4xxx_iscsi_transport.owner);
7751 iscsi_destroy_endpoint(ddb_entry->conn->ep);
7752 qla4xxx_free_ddb(ha, ddb_entry);
7753 iscsi_session_teardown(ddb_entry->sess);
7754 }
7755 }
7756 }
7757 /**
7758 * qla4xxx_remove_adapter - callback function to remove adapter.
7759 * @pci_dev: PCI device pointer
7760 **/
7761 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
7762 {
7763 struct scsi_qla_host *ha;
7764
7765 /*
7766 * If the PCI device is disabled then it means probe_adapter had
7767 * failed and resources already cleaned up on probe_adapter exit.
7768 */
7769 if (!pci_is_enabled(pdev))
7770 return;
7771
7772 ha = pci_get_drvdata(pdev);
7773
7774 if (is_qla40XX(ha))
7775 qla4xxx_prevent_other_port_reinit(ha);
7776
7777 /* destroy iface from sysfs */
7778 qla4xxx_destroy_ifaces(ha);
7779
7780 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
7781 iscsi_boot_destroy_kset(ha->boot_kset);
7782
7783 qla4xxx_destroy_fw_ddb_session(ha);
7784 qla4_8xxx_free_sysfs_attr(ha);
7785
7786 qla4xxx_sysfs_ddb_remove(ha);
7787 scsi_remove_host(ha->host);
7788
7789 qla4xxx_free_adapter(ha);
7790
7791 scsi_host_put(ha->host);
7792
7793 pci_disable_pcie_error_reporting(pdev);
7794 pci_disable_device(pdev);
7795 pci_set_drvdata(pdev, NULL);
7796 }
7797
7798 /**
7799 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
7800 * @ha: HA context
7801 *
7802 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
7803 * supported addressing method.
7804 */
7805 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
7806 {
7807 int retval;
7808
7809 /* Update our PCI device dma_mask for full 64 bit mask */
7810 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
7811 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7812 dev_dbg(&ha->pdev->dev,
7813 "Failed to set 64 bit PCI consistent mask; "
7814 "using 32 bit.\n");
7815 retval = pci_set_consistent_dma_mask(ha->pdev,
7816 DMA_BIT_MASK(32));
7817 }
7818 } else
7819 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
7820 }
7821
7822 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
7823 {
7824 struct iscsi_cls_session *cls_sess;
7825 struct iscsi_session *sess;
7826 struct ddb_entry *ddb;
7827 int queue_depth = QL4_DEF_QDEPTH;
7828
7829 cls_sess = starget_to_session(sdev->sdev_target);
7830 sess = cls_sess->dd_data;
7831 ddb = sess->dd_data;
7832
7833 sdev->hostdata = ddb;
7834 sdev->tagged_supported = 1;
7835
7836 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
7837 queue_depth = ql4xmaxqdepth;
7838
7839 scsi_activate_tcq(sdev, queue_depth);
7840 return 0;
7841 }
7842
7843 static int qla4xxx_slave_configure(struct scsi_device *sdev)
7844 {
7845 sdev->tagged_supported = 1;
7846 return 0;
7847 }
7848
7849 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
7850 {
7851 scsi_deactivate_tcq(sdev, 1);
7852 }
7853
7854 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
7855 int reason)
7856 {
7857 if (!ql4xqfulltracking)
7858 return -EOPNOTSUPP;
7859
7860 return iscsi_change_queue_depth(sdev, qdepth, reason);
7861 }
7862
7863 /**
7864 * qla4xxx_del_from_active_array - returns an active srb
7865 * @ha: Pointer to host adapter structure.
7866 * @index: index into the active_array
7867 *
7868 * This routine removes and returns the srb at the specified index
7869 **/
7870 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
7871 uint32_t index)
7872 {
7873 struct srb *srb = NULL;
7874 struct scsi_cmnd *cmd = NULL;
7875
7876 cmd = scsi_host_find_tag(ha->host, index);
7877 if (!cmd)
7878 return srb;
7879
7880 srb = (struct srb *)CMD_SP(cmd);
7881 if (!srb)
7882 return srb;
7883
7884 /* update counters */
7885 if (srb->flags & SRB_DMA_VALID) {
7886 ha->iocb_cnt -= srb->iocb_cnt;
7887 if (srb->cmd)
7888 srb->cmd->host_scribble =
7889 (unsigned char *)(unsigned long) MAX_SRBS;
7890 }
7891 return srb;
7892 }
7893
7894 /**
7895 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
7896 * @ha: Pointer to host adapter structure.
7897 * @cmd: Scsi Command to wait on.
7898 *
7899 * This routine waits for the command to be returned by the Firmware
7900 * for some max time.
7901 **/
7902 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
7903 struct scsi_cmnd *cmd)
7904 {
7905 int done = 0;
7906 struct srb *rp;
7907 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
7908 int ret = SUCCESS;
7909
7910 /* Dont wait on command if PCI error is being handled
7911 * by PCI AER driver
7912 */
7913 if (unlikely(pci_channel_offline(ha->pdev)) ||
7914 (test_bit(AF_EEH_BUSY, &ha->flags))) {
7915 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
7916 ha->host_no, __func__);
7917 return ret;
7918 }
7919
7920 do {
7921 /* Checking to see if its returned to OS */
7922 rp = (struct srb *) CMD_SP(cmd);
7923 if (rp == NULL) {
7924 done++;
7925 break;
7926 }
7927
7928 msleep(2000);
7929 } while (max_wait_time--);
7930
7931 return done;
7932 }
7933
7934 /**
7935 * qla4xxx_wait_for_hba_online - waits for HBA to come online
7936 * @ha: Pointer to host adapter structure
7937 **/
7938 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
7939 {
7940 unsigned long wait_online;
7941
7942 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
7943 while (time_before(jiffies, wait_online)) {
7944
7945 if (adapter_up(ha))
7946 return QLA_SUCCESS;
7947
7948 msleep(2000);
7949 }
7950
7951 return QLA_ERROR;
7952 }
7953
7954 /**
7955 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
7956 * @ha: pointer to HBA
7957 * @t: target id
7958 * @l: lun id
7959 *
7960 * This function waits for all outstanding commands to a lun to complete. It
7961 * returns 0 if all pending commands are returned and 1 otherwise.
7962 **/
7963 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
7964 struct scsi_target *stgt,
7965 struct scsi_device *sdev)
7966 {
7967 int cnt;
7968 int status = 0;
7969 struct scsi_cmnd *cmd;
7970
7971 /*
7972 * Waiting for all commands for the designated target or dev
7973 * in the active array
7974 */
7975 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
7976 cmd = scsi_host_find_tag(ha->host, cnt);
7977 if (cmd && stgt == scsi_target(cmd->device) &&
7978 (!sdev || sdev == cmd->device)) {
7979 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7980 status++;
7981 break;
7982 }
7983 }
7984 }
7985 return status;
7986 }
7987
7988 /**
7989 * qla4xxx_eh_abort - callback for abort task.
7990 * @cmd: Pointer to Linux's SCSI command structure
7991 *
7992 * This routine is called by the Linux OS to abort the specified
7993 * command.
7994 **/
7995 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
7996 {
7997 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7998 unsigned int id = cmd->device->id;
7999 unsigned int lun = cmd->device->lun;
8000 unsigned long flags;
8001 struct srb *srb = NULL;
8002 int ret = SUCCESS;
8003 int wait = 0;
8004
8005 ql4_printk(KERN_INFO, ha,
8006 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
8007 ha->host_no, id, lun, cmd);
8008
8009 spin_lock_irqsave(&ha->hardware_lock, flags);
8010 srb = (struct srb *) CMD_SP(cmd);
8011 if (!srb) {
8012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
8013 return SUCCESS;
8014 }
8015 kref_get(&srb->srb_ref);
8016 spin_unlock_irqrestore(&ha->hardware_lock, flags);
8017
8018 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
8019 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
8020 ha->host_no, id, lun));
8021 ret = FAILED;
8022 } else {
8023 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
8024 ha->host_no, id, lun));
8025 wait = 1;
8026 }
8027
8028 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
8029
8030 /* Wait for command to complete */
8031 if (wait) {
8032 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
8033 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
8034 ha->host_no, id, lun));
8035 ret = FAILED;
8036 }
8037 }
8038
8039 ql4_printk(KERN_INFO, ha,
8040 "scsi%ld:%d:%d: Abort command - %s\n",
8041 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
8042
8043 return ret;
8044 }
8045
8046 /**
8047 * qla4xxx_eh_device_reset - callback for target reset.
8048 * @cmd: Pointer to Linux's SCSI command structure
8049 *
8050 * This routine is called by the Linux OS to reset all luns on the
8051 * specified target.
8052 **/
8053 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
8054 {
8055 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
8056 struct ddb_entry *ddb_entry = cmd->device->hostdata;
8057 int ret = FAILED, stat;
8058
8059 if (!ddb_entry)
8060 return ret;
8061
8062 ret = iscsi_block_scsi_eh(cmd);
8063 if (ret)
8064 return ret;
8065 ret = FAILED;
8066
8067 ql4_printk(KERN_INFO, ha,
8068 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
8069 cmd->device->channel, cmd->device->id, cmd->device->lun);
8070
8071 DEBUG2(printk(KERN_INFO
8072 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
8073 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
8074 cmd, jiffies, cmd->request->timeout / HZ,
8075 ha->dpc_flags, cmd->result, cmd->allowed));
8076
8077 /* FIXME: wait for hba to go online */
8078 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
8079 if (stat != QLA_SUCCESS) {
8080 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
8081 goto eh_dev_reset_done;
8082 }
8083
8084 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
8085 cmd->device)) {
8086 ql4_printk(KERN_INFO, ha,
8087 "DEVICE RESET FAILED - waiting for "
8088 "commands.\n");
8089 goto eh_dev_reset_done;
8090 }
8091
8092 /* Send marker. */
8093 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
8094 MM_LUN_RESET) != QLA_SUCCESS)
8095 goto eh_dev_reset_done;
8096
8097 ql4_printk(KERN_INFO, ha,
8098 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
8099 ha->host_no, cmd->device->channel, cmd->device->id,
8100 cmd->device->lun);
8101
8102 ret = SUCCESS;
8103
8104 eh_dev_reset_done:
8105
8106 return ret;
8107 }
8108
8109 /**
8110 * qla4xxx_eh_target_reset - callback for target reset.
8111 * @cmd: Pointer to Linux's SCSI command structure
8112 *
8113 * This routine is called by the Linux OS to reset the target.
8114 **/
8115 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
8116 {
8117 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
8118 struct ddb_entry *ddb_entry = cmd->device->hostdata;
8119 int stat, ret;
8120
8121 if (!ddb_entry)
8122 return FAILED;
8123
8124 ret = iscsi_block_scsi_eh(cmd);
8125 if (ret)
8126 return ret;
8127
8128 starget_printk(KERN_INFO, scsi_target(cmd->device),
8129 "WARM TARGET RESET ISSUED.\n");
8130
8131 DEBUG2(printk(KERN_INFO
8132 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
8133 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
8134 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
8135 ha->dpc_flags, cmd->result, cmd->allowed));
8136
8137 stat = qla4xxx_reset_target(ha, ddb_entry);
8138 if (stat != QLA_SUCCESS) {
8139 starget_printk(KERN_INFO, scsi_target(cmd->device),
8140 "WARM TARGET RESET FAILED.\n");
8141 return FAILED;
8142 }
8143
8144 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
8145 NULL)) {
8146 starget_printk(KERN_INFO, scsi_target(cmd->device),
8147 "WARM TARGET DEVICE RESET FAILED - "
8148 "waiting for commands.\n");
8149 return FAILED;
8150 }
8151
8152 /* Send marker. */
8153 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
8154 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
8155 starget_printk(KERN_INFO, scsi_target(cmd->device),
8156 "WARM TARGET DEVICE RESET FAILED - "
8157 "marker iocb failed.\n");
8158 return FAILED;
8159 }
8160
8161 starget_printk(KERN_INFO, scsi_target(cmd->device),
8162 "WARM TARGET RESET SUCCEEDED.\n");
8163 return SUCCESS;
8164 }
8165
8166 /**
8167 * qla4xxx_is_eh_active - check if error handler is running
8168 * @shost: Pointer to SCSI Host struct
8169 *
8170 * This routine finds that if reset host is called in EH
8171 * scenario or from some application like sg_reset
8172 **/
8173 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
8174 {
8175 if (shost->shost_state == SHOST_RECOVERY)
8176 return 1;
8177 return 0;
8178 }
8179
8180 /**
8181 * qla4xxx_eh_host_reset - kernel callback
8182 * @cmd: Pointer to Linux's SCSI command structure
8183 *
8184 * This routine is invoked by the Linux kernel to perform fatal error
8185 * recovery on the specified adapter.
8186 **/
8187 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
8188 {
8189 int return_status = FAILED;
8190 struct scsi_qla_host *ha;
8191
8192 ha = to_qla_host(cmd->device->host);
8193
8194 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
8195 qla4_83xx_set_idc_dontreset(ha);
8196
8197 /*
8198 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
8199 * protocol drivers, we should not set device_state to NEED_RESET
8200 */
8201 if (ql4xdontresethba ||
8202 ((is_qla8032(ha) || is_qla8042(ha)) &&
8203 qla4_83xx_idc_dontreset(ha))) {
8204 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
8205 ha->host_no, __func__));
8206
8207 /* Clear outstanding srb in queues */
8208 if (qla4xxx_is_eh_active(cmd->device->host))
8209 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
8210
8211 return FAILED;
8212 }
8213
8214 ql4_printk(KERN_INFO, ha,
8215 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
8216 cmd->device->channel, cmd->device->id, cmd->device->lun);
8217
8218 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
8219 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
8220 "DEAD.\n", ha->host_no, cmd->device->channel,
8221 __func__));
8222
8223 return FAILED;
8224 }
8225
8226 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8227 if (is_qla80XX(ha))
8228 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
8229 else
8230 set_bit(DPC_RESET_HA, &ha->dpc_flags);
8231 }
8232
8233 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
8234 return_status = SUCCESS;
8235
8236 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
8237 return_status == FAILED ? "FAILED" : "SUCCEEDED");
8238
8239 return return_status;
8240 }
8241
8242 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
8243 {
8244 uint32_t mbox_cmd[MBOX_REG_COUNT];
8245 uint32_t mbox_sts[MBOX_REG_COUNT];
8246 struct addr_ctrl_blk_def *acb = NULL;
8247 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
8248 int rval = QLA_SUCCESS;
8249 dma_addr_t acb_dma;
8250
8251 acb = dma_alloc_coherent(&ha->pdev->dev,
8252 sizeof(struct addr_ctrl_blk_def),
8253 &acb_dma, GFP_KERNEL);
8254 if (!acb) {
8255 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
8256 __func__);
8257 rval = -ENOMEM;
8258 goto exit_port_reset;
8259 }
8260
8261 memset(acb, 0, acb_len);
8262
8263 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
8264 if (rval != QLA_SUCCESS) {
8265 rval = -EIO;
8266 goto exit_free_acb;
8267 }
8268
8269 rval = qla4xxx_disable_acb(ha);
8270 if (rval != QLA_SUCCESS) {
8271 rval = -EIO;
8272 goto exit_free_acb;
8273 }
8274
8275 wait_for_completion_timeout(&ha->disable_acb_comp,
8276 DISABLE_ACB_TOV * HZ);
8277
8278 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
8279 if (rval != QLA_SUCCESS) {
8280 rval = -EIO;
8281 goto exit_free_acb;
8282 }
8283
8284 exit_free_acb:
8285 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
8286 acb, acb_dma);
8287 exit_port_reset:
8288 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
8289 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
8290 return rval;
8291 }
8292
8293 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
8294 {
8295 struct scsi_qla_host *ha = to_qla_host(shost);
8296 int rval = QLA_SUCCESS;
8297 uint32_t idc_ctrl;
8298
8299 if (ql4xdontresethba) {
8300 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
8301 __func__));
8302 rval = -EPERM;
8303 goto exit_host_reset;
8304 }
8305
8306 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
8307 goto recover_adapter;
8308
8309 switch (reset_type) {
8310 case SCSI_ADAPTER_RESET:
8311 set_bit(DPC_RESET_HA, &ha->dpc_flags);
8312 break;
8313 case SCSI_FIRMWARE_RESET:
8314 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8315 if (is_qla80XX(ha))
8316 /* set firmware context reset */
8317 set_bit(DPC_RESET_HA_FW_CONTEXT,
8318 &ha->dpc_flags);
8319 else {
8320 rval = qla4xxx_context_reset(ha);
8321 goto exit_host_reset;
8322 }
8323 }
8324 break;
8325 }
8326
8327 recover_adapter:
8328 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
8329 * reset is issued by application */
8330 if ((is_qla8032(ha) || is_qla8042(ha)) &&
8331 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8332 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
8333 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
8334 (idc_ctrl | GRACEFUL_RESET_BIT1));
8335 }
8336
8337 rval = qla4xxx_recover_adapter(ha);
8338 if (rval != QLA_SUCCESS) {
8339 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
8340 __func__));
8341 rval = -EIO;
8342 }
8343
8344 exit_host_reset:
8345 return rval;
8346 }
8347
8348 /* PCI AER driver recovers from all correctable errors w/o
8349 * driver intervention. For uncorrectable errors PCI AER
8350 * driver calls the following device driver's callbacks
8351 *
8352 * - Fatal Errors - link_reset
8353 * - Non-Fatal Errors - driver's pci_error_detected() which
8354 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
8355 *
8356 * PCI AER driver calls
8357 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
8358 * returns RECOVERED or NEED_RESET if fw_hung
8359 * NEED_RESET - driver's slot_reset()
8360 * DISCONNECT - device is dead & cannot recover
8361 * RECOVERED - driver's pci_resume()
8362 */
8363 static pci_ers_result_t
8364 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8365 {
8366 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8367
8368 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
8369 ha->host_no, __func__, state);
8370
8371 if (!is_aer_supported(ha))
8372 return PCI_ERS_RESULT_NONE;
8373
8374 switch (state) {
8375 case pci_channel_io_normal:
8376 clear_bit(AF_EEH_BUSY, &ha->flags);
8377 return PCI_ERS_RESULT_CAN_RECOVER;
8378 case pci_channel_io_frozen:
8379 set_bit(AF_EEH_BUSY, &ha->flags);
8380 qla4xxx_mailbox_premature_completion(ha);
8381 qla4xxx_free_irqs(ha);
8382 pci_disable_device(pdev);
8383 /* Return back all IOs */
8384 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
8385 return PCI_ERS_RESULT_NEED_RESET;
8386 case pci_channel_io_perm_failure:
8387 set_bit(AF_EEH_BUSY, &ha->flags);
8388 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
8389 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
8390 return PCI_ERS_RESULT_DISCONNECT;
8391 }
8392 return PCI_ERS_RESULT_NEED_RESET;
8393 }
8394
8395 /**
8396 * qla4xxx_pci_mmio_enabled() gets called if
8397 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
8398 * and read/write to the device still works.
8399 **/
8400 static pci_ers_result_t
8401 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
8402 {
8403 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8404
8405 if (!is_aer_supported(ha))
8406 return PCI_ERS_RESULT_NONE;
8407
8408 return PCI_ERS_RESULT_RECOVERED;
8409 }
8410
8411 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
8412 {
8413 uint32_t rval = QLA_ERROR;
8414 int fn;
8415 struct pci_dev *other_pdev = NULL;
8416
8417 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
8418
8419 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8420
8421 if (test_bit(AF_ONLINE, &ha->flags)) {
8422 clear_bit(AF_ONLINE, &ha->flags);
8423 clear_bit(AF_LINK_UP, &ha->flags);
8424 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
8425 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
8426 }
8427
8428 fn = PCI_FUNC(ha->pdev->devfn);
8429 while (fn > 0) {
8430 fn--;
8431 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
8432 "func %x\n", ha->host_no, __func__, fn);
8433 /* Get the pci device given the domain, bus,
8434 * slot/function number */
8435 other_pdev =
8436 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8437 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8438 fn));
8439
8440 if (!other_pdev)
8441 continue;
8442
8443 if (atomic_read(&other_pdev->enable_cnt)) {
8444 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
8445 "func in enabled state%x\n", ha->host_no,
8446 __func__, fn);
8447 pci_dev_put(other_pdev);
8448 break;
8449 }
8450 pci_dev_put(other_pdev);
8451 }
8452
8453 /* The first function on the card, the reset owner will
8454 * start & initialize the firmware. The other functions
8455 * on the card will reset the firmware context
8456 */
8457 if (!fn) {
8458 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
8459 "0x%x is the owner\n", ha->host_no, __func__,
8460 ha->pdev->devfn);
8461
8462 ha->isp_ops->idc_lock(ha);
8463 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8464 QLA8XXX_DEV_COLD);
8465 ha->isp_ops->idc_unlock(ha);
8466
8467 rval = qla4_8xxx_update_idc_reg(ha);
8468 if (rval == QLA_ERROR) {
8469 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
8470 ha->host_no, __func__);
8471 ha->isp_ops->idc_lock(ha);
8472 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8473 QLA8XXX_DEV_FAILED);
8474 ha->isp_ops->idc_unlock(ha);
8475 goto exit_error_recovery;
8476 }
8477
8478 clear_bit(AF_FW_RECOVERY, &ha->flags);
8479 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8480
8481 if (rval != QLA_SUCCESS) {
8482 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8483 "FAILED\n", ha->host_no, __func__);
8484 ha->isp_ops->idc_lock(ha);
8485 qla4_8xxx_clear_drv_active(ha);
8486 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8487 QLA8XXX_DEV_FAILED);
8488 ha->isp_ops->idc_unlock(ha);
8489 } else {
8490 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8491 "READY\n", ha->host_no, __func__);
8492 ha->isp_ops->idc_lock(ha);
8493 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8494 QLA8XXX_DEV_READY);
8495 /* Clear driver state register */
8496 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
8497 qla4_8xxx_set_drv_active(ha);
8498 ha->isp_ops->idc_unlock(ha);
8499 ha->isp_ops->enable_intrs(ha);
8500 }
8501 } else {
8502 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
8503 "the reset owner\n", ha->host_no, __func__,
8504 ha->pdev->devfn);
8505 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
8506 QLA8XXX_DEV_READY)) {
8507 clear_bit(AF_FW_RECOVERY, &ha->flags);
8508 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8509 if (rval == QLA_SUCCESS)
8510 ha->isp_ops->enable_intrs(ha);
8511
8512 ha->isp_ops->idc_lock(ha);
8513 qla4_8xxx_set_drv_active(ha);
8514 ha->isp_ops->idc_unlock(ha);
8515 }
8516 }
8517 exit_error_recovery:
8518 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8519 return rval;
8520 }
8521
8522 static pci_ers_result_t
8523 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
8524 {
8525 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
8526 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8527 int rc;
8528
8529 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
8530 ha->host_no, __func__);
8531
8532 if (!is_aer_supported(ha))
8533 return PCI_ERS_RESULT_NONE;
8534
8535 /* Restore the saved state of PCIe device -
8536 * BAR registers, PCI Config space, PCIX, MSI,
8537 * IOV states
8538 */
8539 pci_restore_state(pdev);
8540
8541 /* pci_restore_state() clears the saved_state flag of the device
8542 * save restored state which resets saved_state flag
8543 */
8544 pci_save_state(pdev);
8545
8546 /* Initialize device or resume if in suspended state */
8547 rc = pci_enable_device(pdev);
8548 if (rc) {
8549 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
8550 "device after reset\n", ha->host_no, __func__);
8551 goto exit_slot_reset;
8552 }
8553
8554 ha->isp_ops->disable_intrs(ha);
8555
8556 if (is_qla80XX(ha)) {
8557 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
8558 ret = PCI_ERS_RESULT_RECOVERED;
8559 goto exit_slot_reset;
8560 } else
8561 goto exit_slot_reset;
8562 }
8563
8564 exit_slot_reset:
8565 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
8566 "device after reset\n", ha->host_no, __func__, ret);
8567 return ret;
8568 }
8569
8570 static void
8571 qla4xxx_pci_resume(struct pci_dev *pdev)
8572 {
8573 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8574 int ret;
8575
8576 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
8577 ha->host_no, __func__);
8578
8579 ret = qla4xxx_wait_for_hba_online(ha);
8580 if (ret != QLA_SUCCESS) {
8581 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
8582 "resume I/O from slot/link_reset\n", ha->host_no,
8583 __func__);
8584 }
8585
8586 pci_cleanup_aer_uncorrect_error_status(pdev);
8587 clear_bit(AF_EEH_BUSY, &ha->flags);
8588 }
8589
8590 static const struct pci_error_handlers qla4xxx_err_handler = {
8591 .error_detected = qla4xxx_pci_error_detected,
8592 .mmio_enabled = qla4xxx_pci_mmio_enabled,
8593 .slot_reset = qla4xxx_pci_slot_reset,
8594 .resume = qla4xxx_pci_resume,
8595 };
8596
8597 static struct pci_device_id qla4xxx_pci_tbl[] = {
8598 {
8599 .vendor = PCI_VENDOR_ID_QLOGIC,
8600 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
8601 .subvendor = PCI_ANY_ID,
8602 .subdevice = PCI_ANY_ID,
8603 },
8604 {
8605 .vendor = PCI_VENDOR_ID_QLOGIC,
8606 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
8607 .subvendor = PCI_ANY_ID,
8608 .subdevice = PCI_ANY_ID,
8609 },
8610 {
8611 .vendor = PCI_VENDOR_ID_QLOGIC,
8612 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
8613 .subvendor = PCI_ANY_ID,
8614 .subdevice = PCI_ANY_ID,
8615 },
8616 {
8617 .vendor = PCI_VENDOR_ID_QLOGIC,
8618 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
8619 .subvendor = PCI_ANY_ID,
8620 .subdevice = PCI_ANY_ID,
8621 },
8622 {
8623 .vendor = PCI_VENDOR_ID_QLOGIC,
8624 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
8625 .subvendor = PCI_ANY_ID,
8626 .subdevice = PCI_ANY_ID,
8627 },
8628 {
8629 .vendor = PCI_VENDOR_ID_QLOGIC,
8630 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
8631 .subvendor = PCI_ANY_ID,
8632 .subdevice = PCI_ANY_ID,
8633 },
8634 {0, 0},
8635 };
8636 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
8637
8638 static struct pci_driver qla4xxx_pci_driver = {
8639 .name = DRIVER_NAME,
8640 .id_table = qla4xxx_pci_tbl,
8641 .probe = qla4xxx_probe_adapter,
8642 .remove = qla4xxx_remove_adapter,
8643 .err_handler = &qla4xxx_err_handler,
8644 };
8645
8646 static int __init qla4xxx_module_init(void)
8647 {
8648 int ret;
8649
8650 /* Allocate cache for SRBs. */
8651 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
8652 SLAB_HWCACHE_ALIGN, NULL);
8653 if (srb_cachep == NULL) {
8654 printk(KERN_ERR
8655 "%s: Unable to allocate SRB cache..."
8656 "Failing load!\n", DRIVER_NAME);
8657 ret = -ENOMEM;
8658 goto no_srp_cache;
8659 }
8660
8661 /* Derive version string. */
8662 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
8663 if (ql4xextended_error_logging)
8664 strcat(qla4xxx_version_str, "-debug");
8665
8666 qla4xxx_scsi_transport =
8667 iscsi_register_transport(&qla4xxx_iscsi_transport);
8668 if (!qla4xxx_scsi_transport){
8669 ret = -ENODEV;
8670 goto release_srb_cache;
8671 }
8672
8673 ret = pci_register_driver(&qla4xxx_pci_driver);
8674 if (ret)
8675 goto unregister_transport;
8676
8677 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
8678 return 0;
8679
8680 unregister_transport:
8681 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8682 release_srb_cache:
8683 kmem_cache_destroy(srb_cachep);
8684 no_srp_cache:
8685 return ret;
8686 }
8687
8688 static void __exit qla4xxx_module_exit(void)
8689 {
8690 pci_unregister_driver(&qla4xxx_pci_driver);
8691 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8692 kmem_cache_destroy(srb_cachep);
8693 }
8694
8695 module_init(qla4xxx_module_init);
8696 module_exit(qla4xxx_module_exit);
8697
8698 MODULE_AUTHOR("QLogic Corporation");
8699 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
8700 MODULE_LICENSE("GPL");
8701 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);