[SCSI] qla4xxx: correctly update session discovery_parent_idx.
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / qla4xxx / ql4_os.c
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22
23 /*
24 * Driver version
25 */
26 static char qla4xxx_version_str[40];
27
28 /*
29 * SRB allocation cache
30 */
31 static struct kmem_cache *srb_cachep;
32
33 /*
34 * Module parameter information and variables
35 */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 " Set to disable exporting boot targets to sysfs.\n"
40 "\t\t 0 - Export boot targets\n"
41 "\t\t 1 - Do not export boot targets (Default)");
42
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 " Don't reset the HBA for driver recovery.\n"
47 "\t\t 0 - It will reset HBA (Default)\n"
48 "\t\t 1 - It will NOT reset HBA");
49
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 " Option to enable extended error logging.\n"
54 "\t\t 0 - no logging (Default)\n"
55 "\t\t 2 - debug logging");
56
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 "\t\t 0 = enable INTx interrupt mechanism.\n"
62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
63 "\t\t 2 = enable MSI interrupt mechanism.");
64
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 " Maximum queue depth to report for target devices.\n"
70 "\t\t Default: 32.");
71
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 " Enable or disable dynamic tracking and adjustment of\n"
76 "\t\t scsi device queue depth.\n"
77 "\t\t 0 - Disable.\n"
78 "\t\t 1 - Enable. (Default)");
79
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec.");
85
86 int ql4xmdcapmask = 0x1F;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0x1F.\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
92
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 " Set to enable minidump.\n"
97 "\t\t 0 - disable minidump\n"
98 "\t\t 1 - enable minidump (Default)");
99
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102 * SCSI host template entry points
103 */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105
106 /*
107 * iSCSI template entry points
108 */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 enum iscsi_param_type param_type,
119 int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 struct sockaddr *dst_addr,
123 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 struct iscsi_cls_conn *cls_conn,
133 uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 uint32_t iface_type, uint32_t payload_size,
148 uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152
153 /*
154 * SCSI host template entry points
155 */
156 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
161 static int qla4xxx_slave_alloc(struct scsi_device *device);
162 static int qla4xxx_slave_configure(struct scsi_device *device);
163 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
164 static umode_t qla4_attr_is_visible(int param_type, int param);
165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
166 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
167 int reason);
168
169 /*
170 * iSCSI Flash DDB sysfs entry points
171 */
172 static int
173 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
174 struct iscsi_bus_flash_conn *fnode_conn,
175 void *data, int len);
176 static int
177 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
178 int param, char *buf);
179 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
180 int len);
181 static int
182 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
183 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
184 struct iscsi_bus_flash_conn *fnode_conn);
185 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
186 struct iscsi_bus_flash_conn *fnode_conn);
187 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188
189 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
190 QLA82XX_LEGACY_INTR_CONFIG;
191
192 static struct scsi_host_template qla4xxx_driver_template = {
193 .module = THIS_MODULE,
194 .name = DRIVER_NAME,
195 .proc_name = DRIVER_NAME,
196 .queuecommand = qla4xxx_queuecommand,
197
198 .eh_abort_handler = qla4xxx_eh_abort,
199 .eh_device_reset_handler = qla4xxx_eh_device_reset,
200 .eh_target_reset_handler = qla4xxx_eh_target_reset,
201 .eh_host_reset_handler = qla4xxx_eh_host_reset,
202 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
203
204 .slave_configure = qla4xxx_slave_configure,
205 .slave_alloc = qla4xxx_slave_alloc,
206 .slave_destroy = qla4xxx_slave_destroy,
207 .change_queue_depth = qla4xxx_change_queue_depth,
208
209 .this_id = -1,
210 .cmd_per_lun = 3,
211 .use_clustering = ENABLE_CLUSTERING,
212 .sg_tablesize = SG_ALL,
213
214 .max_sectors = 0xFFFF,
215 .shost_attrs = qla4xxx_host_attrs,
216 .host_reset = qla4xxx_host_reset,
217 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
218 };
219
220 static struct iscsi_transport qla4xxx_iscsi_transport = {
221 .owner = THIS_MODULE,
222 .name = DRIVER_NAME,
223 .caps = CAP_TEXT_NEGO |
224 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
225 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
226 CAP_MULTI_R2T,
227 .attr_is_visible = qla4_attr_is_visible,
228 .create_session = qla4xxx_session_create,
229 .destroy_session = qla4xxx_session_destroy,
230 .start_conn = qla4xxx_conn_start,
231 .create_conn = qla4xxx_conn_create,
232 .bind_conn = qla4xxx_conn_bind,
233 .stop_conn = iscsi_conn_stop,
234 .destroy_conn = qla4xxx_conn_destroy,
235 .set_param = iscsi_set_param,
236 .get_conn_param = qla4xxx_conn_get_param,
237 .get_session_param = qla4xxx_session_get_param,
238 .get_ep_param = qla4xxx_get_ep_param,
239 .ep_connect = qla4xxx_ep_connect,
240 .ep_poll = qla4xxx_ep_poll,
241 .ep_disconnect = qla4xxx_ep_disconnect,
242 .get_stats = qla4xxx_conn_get_stats,
243 .send_pdu = iscsi_conn_send_pdu,
244 .xmit_task = qla4xxx_task_xmit,
245 .cleanup_task = qla4xxx_task_cleanup,
246 .alloc_pdu = qla4xxx_alloc_pdu,
247
248 .get_host_param = qla4xxx_host_get_param,
249 .set_iface_param = qla4xxx_iface_set_param,
250 .get_iface_param = qla4xxx_get_iface_param,
251 .bsg_request = qla4xxx_bsg_request,
252 .send_ping = qla4xxx_send_ping,
253 .get_chap = qla4xxx_get_chap_list,
254 .delete_chap = qla4xxx_delete_chap,
255 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
256 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
257 .new_flashnode = qla4xxx_sysfs_ddb_add,
258 .del_flashnode = qla4xxx_sysfs_ddb_delete,
259 .login_flashnode = qla4xxx_sysfs_ddb_login,
260 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
261 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
262 };
263
264 static struct scsi_transport_template *qla4xxx_scsi_transport;
265
266 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
267 uint32_t iface_type, uint32_t payload_size,
268 uint32_t pid, struct sockaddr *dst_addr)
269 {
270 struct scsi_qla_host *ha = to_qla_host(shost);
271 struct sockaddr_in *addr;
272 struct sockaddr_in6 *addr6;
273 uint32_t options = 0;
274 uint8_t ipaddr[IPv6_ADDR_LEN];
275 int rval;
276
277 memset(ipaddr, 0, IPv6_ADDR_LEN);
278 /* IPv4 to IPv4 */
279 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
280 (dst_addr->sa_family == AF_INET)) {
281 addr = (struct sockaddr_in *)dst_addr;
282 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
283 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
284 "dest: %pI4\n", __func__,
285 &ha->ip_config.ip_address, ipaddr));
286 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
287 ipaddr);
288 if (rval)
289 rval = -EINVAL;
290 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
291 (dst_addr->sa_family == AF_INET6)) {
292 /* IPv6 to IPv6 */
293 addr6 = (struct sockaddr_in6 *)dst_addr;
294 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
295
296 options |= PING_IPV6_PROTOCOL_ENABLE;
297
298 /* Ping using LinkLocal address */
299 if ((iface_num == 0) || (iface_num == 1)) {
300 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
301 "src: %pI6 dest: %pI6\n", __func__,
302 &ha->ip_config.ipv6_link_local_addr,
303 ipaddr));
304 options |= PING_IPV6_LINKLOCAL_ADDR;
305 rval = qla4xxx_ping_iocb(ha, options, payload_size,
306 pid, ipaddr);
307 } else {
308 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
309 "not supported\n", __func__, iface_num);
310 rval = -ENOSYS;
311 goto exit_send_ping;
312 }
313
314 /*
315 * If ping using LinkLocal address fails, try ping using
316 * IPv6 address
317 */
318 if (rval != QLA_SUCCESS) {
319 options &= ~PING_IPV6_LINKLOCAL_ADDR;
320 if (iface_num == 0) {
321 options |= PING_IPV6_ADDR0;
322 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
323 "Ping src: %pI6 "
324 "dest: %pI6\n", __func__,
325 &ha->ip_config.ipv6_addr0,
326 ipaddr));
327 } else if (iface_num == 1) {
328 options |= PING_IPV6_ADDR1;
329 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
330 "Ping src: %pI6 "
331 "dest: %pI6\n", __func__,
332 &ha->ip_config.ipv6_addr1,
333 ipaddr));
334 }
335 rval = qla4xxx_ping_iocb(ha, options, payload_size,
336 pid, ipaddr);
337 if (rval)
338 rval = -EINVAL;
339 }
340 } else
341 rval = -ENOSYS;
342 exit_send_ping:
343 return rval;
344 }
345
346 static umode_t qla4_attr_is_visible(int param_type, int param)
347 {
348 switch (param_type) {
349 case ISCSI_HOST_PARAM:
350 switch (param) {
351 case ISCSI_HOST_PARAM_HWADDRESS:
352 case ISCSI_HOST_PARAM_IPADDRESS:
353 case ISCSI_HOST_PARAM_INITIATOR_NAME:
354 case ISCSI_HOST_PARAM_PORT_STATE:
355 case ISCSI_HOST_PARAM_PORT_SPEED:
356 return S_IRUGO;
357 default:
358 return 0;
359 }
360 case ISCSI_PARAM:
361 switch (param) {
362 case ISCSI_PARAM_PERSISTENT_ADDRESS:
363 case ISCSI_PARAM_PERSISTENT_PORT:
364 case ISCSI_PARAM_CONN_ADDRESS:
365 case ISCSI_PARAM_CONN_PORT:
366 case ISCSI_PARAM_TARGET_NAME:
367 case ISCSI_PARAM_TPGT:
368 case ISCSI_PARAM_TARGET_ALIAS:
369 case ISCSI_PARAM_MAX_BURST:
370 case ISCSI_PARAM_MAX_R2T:
371 case ISCSI_PARAM_FIRST_BURST:
372 case ISCSI_PARAM_MAX_RECV_DLENGTH:
373 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
374 case ISCSI_PARAM_IFACE_NAME:
375 case ISCSI_PARAM_CHAP_OUT_IDX:
376 case ISCSI_PARAM_CHAP_IN_IDX:
377 case ISCSI_PARAM_USERNAME:
378 case ISCSI_PARAM_PASSWORD:
379 case ISCSI_PARAM_USERNAME_IN:
380 case ISCSI_PARAM_PASSWORD_IN:
381 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
382 case ISCSI_PARAM_DISCOVERY_SESS:
383 case ISCSI_PARAM_PORTAL_TYPE:
384 case ISCSI_PARAM_CHAP_AUTH_EN:
385 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
386 case ISCSI_PARAM_BIDI_CHAP_EN:
387 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
388 case ISCSI_PARAM_DEF_TIME2WAIT:
389 case ISCSI_PARAM_DEF_TIME2RETAIN:
390 case ISCSI_PARAM_HDRDGST_EN:
391 case ISCSI_PARAM_DATADGST_EN:
392 case ISCSI_PARAM_INITIAL_R2T_EN:
393 case ISCSI_PARAM_IMM_DATA_EN:
394 case ISCSI_PARAM_PDU_INORDER_EN:
395 case ISCSI_PARAM_DATASEQ_INORDER_EN:
396 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
397 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
398 case ISCSI_PARAM_TCP_WSF_DISABLE:
399 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
400 case ISCSI_PARAM_TCP_TIMER_SCALE:
401 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
402 case ISCSI_PARAM_TCP_XMIT_WSF:
403 case ISCSI_PARAM_TCP_RECV_WSF:
404 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
405 case ISCSI_PARAM_IPV4_TOS:
406 case ISCSI_PARAM_IPV6_TC:
407 case ISCSI_PARAM_IPV6_FLOW_LABEL:
408 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
409 case ISCSI_PARAM_KEEPALIVE_TMO:
410 case ISCSI_PARAM_LOCAL_PORT:
411 case ISCSI_PARAM_ISID:
412 case ISCSI_PARAM_TSID:
413 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
414 case ISCSI_PARAM_ERL:
415 case ISCSI_PARAM_STATSN:
416 case ISCSI_PARAM_EXP_STATSN:
417 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
418 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
419 return S_IRUGO;
420 default:
421 return 0;
422 }
423 case ISCSI_NET_PARAM:
424 switch (param) {
425 case ISCSI_NET_PARAM_IPV4_ADDR:
426 case ISCSI_NET_PARAM_IPV4_SUBNET:
427 case ISCSI_NET_PARAM_IPV4_GW:
428 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
429 case ISCSI_NET_PARAM_IFACE_ENABLE:
430 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
431 case ISCSI_NET_PARAM_IPV6_ADDR:
432 case ISCSI_NET_PARAM_IPV6_ROUTER:
433 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
434 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
435 case ISCSI_NET_PARAM_VLAN_ID:
436 case ISCSI_NET_PARAM_VLAN_PRIORITY:
437 case ISCSI_NET_PARAM_VLAN_ENABLED:
438 case ISCSI_NET_PARAM_MTU:
439 case ISCSI_NET_PARAM_PORT:
440 return S_IRUGO;
441 default:
442 return 0;
443 }
444 case ISCSI_FLASHNODE_PARAM:
445 switch (param) {
446 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
447 case ISCSI_FLASHNODE_PORTAL_TYPE:
448 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
449 case ISCSI_FLASHNODE_DISCOVERY_SESS:
450 case ISCSI_FLASHNODE_ENTRY_EN:
451 case ISCSI_FLASHNODE_HDR_DGST_EN:
452 case ISCSI_FLASHNODE_DATA_DGST_EN:
453 case ISCSI_FLASHNODE_IMM_DATA_EN:
454 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
455 case ISCSI_FLASHNODE_DATASEQ_INORDER:
456 case ISCSI_FLASHNODE_PDU_INORDER:
457 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
458 case ISCSI_FLASHNODE_SNACK_REQ_EN:
459 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
460 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
461 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
462 case ISCSI_FLASHNODE_ERL:
463 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
464 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
465 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
466 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
467 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
468 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
469 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
470 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
471 case ISCSI_FLASHNODE_FIRST_BURST:
472 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
473 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
474 case ISCSI_FLASHNODE_MAX_R2T:
475 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
476 case ISCSI_FLASHNODE_ISID:
477 case ISCSI_FLASHNODE_TSID:
478 case ISCSI_FLASHNODE_PORT:
479 case ISCSI_FLASHNODE_MAX_BURST:
480 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
481 case ISCSI_FLASHNODE_IPADDR:
482 case ISCSI_FLASHNODE_ALIAS:
483 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
484 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
485 case ISCSI_FLASHNODE_LOCAL_PORT:
486 case ISCSI_FLASHNODE_IPV4_TOS:
487 case ISCSI_FLASHNODE_IPV6_TC:
488 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
489 case ISCSI_FLASHNODE_NAME:
490 case ISCSI_FLASHNODE_TPGT:
491 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
492 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
493 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
494 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
495 case ISCSI_FLASHNODE_TCP_RECV_WSF:
496 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
497 case ISCSI_FLASHNODE_USERNAME:
498 case ISCSI_FLASHNODE_PASSWORD:
499 case ISCSI_FLASHNODE_STATSN:
500 case ISCSI_FLASHNODE_EXP_STATSN:
501 case ISCSI_FLASHNODE_IS_BOOT_TGT:
502 return S_IRUGO;
503 default:
504 return 0;
505 }
506 }
507
508 return 0;
509 }
510
511 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
512 uint32_t *num_entries, char *buf)
513 {
514 struct scsi_qla_host *ha = to_qla_host(shost);
515 struct ql4_chap_table *chap_table;
516 struct iscsi_chap_rec *chap_rec;
517 int max_chap_entries = 0;
518 int valid_chap_entries = 0;
519 int ret = 0, i;
520
521 if (is_qla80XX(ha))
522 max_chap_entries = (ha->hw.flt_chap_size / 2) /
523 sizeof(struct ql4_chap_table);
524 else
525 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
526
527 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
528 __func__, *num_entries, chap_tbl_idx);
529
530 if (!buf) {
531 ret = -ENOMEM;
532 goto exit_get_chap_list;
533 }
534
535 chap_rec = (struct iscsi_chap_rec *) buf;
536 mutex_lock(&ha->chap_sem);
537 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
538 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
539 if (chap_table->cookie !=
540 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
541 continue;
542
543 chap_rec->chap_tbl_idx = i;
544 strncpy(chap_rec->username, chap_table->name,
545 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
546 strncpy(chap_rec->password, chap_table->secret,
547 QL4_CHAP_MAX_SECRET_LEN);
548 chap_rec->password_length = chap_table->secret_len;
549
550 if (chap_table->flags & BIT_7) /* local */
551 chap_rec->chap_type = CHAP_TYPE_OUT;
552
553 if (chap_table->flags & BIT_6) /* peer */
554 chap_rec->chap_type = CHAP_TYPE_IN;
555
556 chap_rec++;
557
558 valid_chap_entries++;
559 if (valid_chap_entries == *num_entries)
560 break;
561 else
562 continue;
563 }
564 mutex_unlock(&ha->chap_sem);
565
566 exit_get_chap_list:
567 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
568 __func__, valid_chap_entries);
569 *num_entries = valid_chap_entries;
570 return ret;
571 }
572
573 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
574 {
575 int ret = 0;
576 uint16_t *chap_tbl_idx = (uint16_t *) data;
577 struct iscsi_cls_session *cls_session;
578 struct iscsi_session *sess;
579 struct ddb_entry *ddb_entry;
580
581 if (!iscsi_is_session_dev(dev))
582 goto exit_is_chap_active;
583
584 cls_session = iscsi_dev_to_session(dev);
585 sess = cls_session->dd_data;
586 ddb_entry = sess->dd_data;
587
588 if (iscsi_session_chkready(cls_session))
589 goto exit_is_chap_active;
590
591 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
592 ret = 1;
593
594 exit_is_chap_active:
595 return ret;
596 }
597
598 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
599 uint16_t chap_tbl_idx)
600 {
601 int ret = 0;
602
603 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
604 __qla4xxx_is_chap_active);
605
606 return ret;
607 }
608
609 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
610 {
611 struct scsi_qla_host *ha = to_qla_host(shost);
612 struct ql4_chap_table *chap_table;
613 dma_addr_t chap_dma;
614 int max_chap_entries = 0;
615 uint32_t offset = 0;
616 uint32_t chap_size;
617 int ret = 0;
618
619 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
620 if (chap_table == NULL)
621 return -ENOMEM;
622
623 memset(chap_table, 0, sizeof(struct ql4_chap_table));
624
625 if (is_qla80XX(ha))
626 max_chap_entries = (ha->hw.flt_chap_size / 2) /
627 sizeof(struct ql4_chap_table);
628 else
629 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
630
631 if (chap_tbl_idx > max_chap_entries) {
632 ret = -EINVAL;
633 goto exit_delete_chap;
634 }
635
636 /* Check if chap index is in use.
637 * If chap is in use don't delet chap entry */
638 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
639 if (ret) {
640 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
641 "delete from flash\n", chap_tbl_idx);
642 ret = -EBUSY;
643 goto exit_delete_chap;
644 }
645
646 chap_size = sizeof(struct ql4_chap_table);
647 if (is_qla40XX(ha))
648 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
649 else {
650 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
651 /* flt_chap_size is CHAP table size for both ports
652 * so divide it by 2 to calculate the offset for second port
653 */
654 if (ha->port_num == 1)
655 offset += (ha->hw.flt_chap_size / 2);
656 offset += (chap_tbl_idx * chap_size);
657 }
658
659 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
660 if (ret != QLA_SUCCESS) {
661 ret = -EINVAL;
662 goto exit_delete_chap;
663 }
664
665 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
666 __le16_to_cpu(chap_table->cookie)));
667
668 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
669 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
670 goto exit_delete_chap;
671 }
672
673 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
674
675 offset = FLASH_CHAP_OFFSET |
676 (chap_tbl_idx * sizeof(struct ql4_chap_table));
677 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
678 FLASH_OPT_RMW_COMMIT);
679 if (ret == QLA_SUCCESS && ha->chap_list) {
680 mutex_lock(&ha->chap_sem);
681 /* Update ha chap_list cache */
682 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
683 chap_table, sizeof(struct ql4_chap_table));
684 mutex_unlock(&ha->chap_sem);
685 }
686 if (ret != QLA_SUCCESS)
687 ret = -EINVAL;
688
689 exit_delete_chap:
690 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
691 return ret;
692 }
693
694 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
695 enum iscsi_param_type param_type,
696 int param, char *buf)
697 {
698 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
699 struct scsi_qla_host *ha = to_qla_host(shost);
700 int len = -ENOSYS;
701
702 if (param_type != ISCSI_NET_PARAM)
703 return -ENOSYS;
704
705 switch (param) {
706 case ISCSI_NET_PARAM_IPV4_ADDR:
707 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
708 break;
709 case ISCSI_NET_PARAM_IPV4_SUBNET:
710 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
711 break;
712 case ISCSI_NET_PARAM_IPV4_GW:
713 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
714 break;
715 case ISCSI_NET_PARAM_IFACE_ENABLE:
716 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
717 len = sprintf(buf, "%s\n",
718 (ha->ip_config.ipv4_options &
719 IPOPT_IPV4_PROTOCOL_ENABLE) ?
720 "enabled" : "disabled");
721 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
722 len = sprintf(buf, "%s\n",
723 (ha->ip_config.ipv6_options &
724 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
725 "enabled" : "disabled");
726 break;
727 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
728 len = sprintf(buf, "%s\n",
729 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
730 "dhcp" : "static");
731 break;
732 case ISCSI_NET_PARAM_IPV6_ADDR:
733 if (iface->iface_num == 0)
734 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
735 if (iface->iface_num == 1)
736 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
737 break;
738 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
739 len = sprintf(buf, "%pI6\n",
740 &ha->ip_config.ipv6_link_local_addr);
741 break;
742 case ISCSI_NET_PARAM_IPV6_ROUTER:
743 len = sprintf(buf, "%pI6\n",
744 &ha->ip_config.ipv6_default_router_addr);
745 break;
746 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
747 len = sprintf(buf, "%s\n",
748 (ha->ip_config.ipv6_addl_options &
749 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
750 "nd" : "static");
751 break;
752 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
753 len = sprintf(buf, "%s\n",
754 (ha->ip_config.ipv6_addl_options &
755 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
756 "auto" : "static");
757 break;
758 case ISCSI_NET_PARAM_VLAN_ID:
759 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
760 len = sprintf(buf, "%d\n",
761 (ha->ip_config.ipv4_vlan_tag &
762 ISCSI_MAX_VLAN_ID));
763 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
764 len = sprintf(buf, "%d\n",
765 (ha->ip_config.ipv6_vlan_tag &
766 ISCSI_MAX_VLAN_ID));
767 break;
768 case ISCSI_NET_PARAM_VLAN_PRIORITY:
769 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
770 len = sprintf(buf, "%d\n",
771 ((ha->ip_config.ipv4_vlan_tag >> 13) &
772 ISCSI_MAX_VLAN_PRIORITY));
773 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
774 len = sprintf(buf, "%d\n",
775 ((ha->ip_config.ipv6_vlan_tag >> 13) &
776 ISCSI_MAX_VLAN_PRIORITY));
777 break;
778 case ISCSI_NET_PARAM_VLAN_ENABLED:
779 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
780 len = sprintf(buf, "%s\n",
781 (ha->ip_config.ipv4_options &
782 IPOPT_VLAN_TAGGING_ENABLE) ?
783 "enabled" : "disabled");
784 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
785 len = sprintf(buf, "%s\n",
786 (ha->ip_config.ipv6_options &
787 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
788 "enabled" : "disabled");
789 break;
790 case ISCSI_NET_PARAM_MTU:
791 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
792 break;
793 case ISCSI_NET_PARAM_PORT:
794 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
795 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
796 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
797 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
798 break;
799 default:
800 len = -ENOSYS;
801 }
802
803 return len;
804 }
805
806 static struct iscsi_endpoint *
807 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
808 int non_blocking)
809 {
810 int ret;
811 struct iscsi_endpoint *ep;
812 struct qla_endpoint *qla_ep;
813 struct scsi_qla_host *ha;
814 struct sockaddr_in *addr;
815 struct sockaddr_in6 *addr6;
816
817 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
818 if (!shost) {
819 ret = -ENXIO;
820 printk(KERN_ERR "%s: shost is NULL\n",
821 __func__);
822 return ERR_PTR(ret);
823 }
824
825 ha = iscsi_host_priv(shost);
826
827 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
828 if (!ep) {
829 ret = -ENOMEM;
830 return ERR_PTR(ret);
831 }
832
833 qla_ep = ep->dd_data;
834 memset(qla_ep, 0, sizeof(struct qla_endpoint));
835 if (dst_addr->sa_family == AF_INET) {
836 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
837 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
838 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
839 (char *)&addr->sin_addr));
840 } else if (dst_addr->sa_family == AF_INET6) {
841 memcpy(&qla_ep->dst_addr, dst_addr,
842 sizeof(struct sockaddr_in6));
843 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
844 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
845 (char *)&addr6->sin6_addr));
846 }
847
848 qla_ep->host = shost;
849
850 return ep;
851 }
852
853 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
854 {
855 struct qla_endpoint *qla_ep;
856 struct scsi_qla_host *ha;
857 int ret = 0;
858
859 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
860 qla_ep = ep->dd_data;
861 ha = to_qla_host(qla_ep->host);
862
863 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
864 ret = 1;
865
866 return ret;
867 }
868
869 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
870 {
871 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
872 iscsi_destroy_endpoint(ep);
873 }
874
875 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
876 enum iscsi_param param,
877 char *buf)
878 {
879 struct qla_endpoint *qla_ep = ep->dd_data;
880 struct sockaddr *dst_addr;
881
882 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
883
884 switch (param) {
885 case ISCSI_PARAM_CONN_PORT:
886 case ISCSI_PARAM_CONN_ADDRESS:
887 if (!qla_ep)
888 return -ENOTCONN;
889
890 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
891 if (!dst_addr)
892 return -ENOTCONN;
893
894 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
895 &qla_ep->dst_addr, param, buf);
896 default:
897 return -ENOSYS;
898 }
899 }
900
901 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
902 struct iscsi_stats *stats)
903 {
904 struct iscsi_session *sess;
905 struct iscsi_cls_session *cls_sess;
906 struct ddb_entry *ddb_entry;
907 struct scsi_qla_host *ha;
908 struct ql_iscsi_stats *ql_iscsi_stats;
909 int stats_size;
910 int ret;
911 dma_addr_t iscsi_stats_dma;
912
913 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
914
915 cls_sess = iscsi_conn_to_session(cls_conn);
916 sess = cls_sess->dd_data;
917 ddb_entry = sess->dd_data;
918 ha = ddb_entry->ha;
919
920 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
921 /* Allocate memory */
922 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
923 &iscsi_stats_dma, GFP_KERNEL);
924 if (!ql_iscsi_stats) {
925 ql4_printk(KERN_ERR, ha,
926 "Unable to allocate memory for iscsi stats\n");
927 goto exit_get_stats;
928 }
929
930 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
931 iscsi_stats_dma);
932 if (ret != QLA_SUCCESS) {
933 ql4_printk(KERN_ERR, ha,
934 "Unable to retrieve iscsi stats\n");
935 goto free_stats;
936 }
937
938 /* octets */
939 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
940 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
941 /* xmit pdus */
942 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
943 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
944 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
945 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
946 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
947 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
948 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
949 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
950 /* recv pdus */
951 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
952 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
953 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
954 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
955 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
956 stats->logoutrsp_pdus =
957 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
958 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
959 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
960 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
961
962 free_stats:
963 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
964 iscsi_stats_dma);
965 exit_get_stats:
966 return;
967 }
968
969 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
970 {
971 struct iscsi_cls_session *session;
972 struct iscsi_session *sess;
973 unsigned long flags;
974 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
975
976 session = starget_to_session(scsi_target(sc->device));
977 sess = session->dd_data;
978
979 spin_lock_irqsave(&session->lock, flags);
980 if (session->state == ISCSI_SESSION_FAILED)
981 ret = BLK_EH_RESET_TIMER;
982 spin_unlock_irqrestore(&session->lock, flags);
983
984 return ret;
985 }
986
987 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
988 {
989 struct scsi_qla_host *ha = to_qla_host(shost);
990 struct iscsi_cls_host *ihost = shost->shost_data;
991 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
992
993 qla4xxx_get_firmware_state(ha);
994
995 switch (ha->addl_fw_state & 0x0F00) {
996 case FW_ADDSTATE_LINK_SPEED_10MBPS:
997 speed = ISCSI_PORT_SPEED_10MBPS;
998 break;
999 case FW_ADDSTATE_LINK_SPEED_100MBPS:
1000 speed = ISCSI_PORT_SPEED_100MBPS;
1001 break;
1002 case FW_ADDSTATE_LINK_SPEED_1GBPS:
1003 speed = ISCSI_PORT_SPEED_1GBPS;
1004 break;
1005 case FW_ADDSTATE_LINK_SPEED_10GBPS:
1006 speed = ISCSI_PORT_SPEED_10GBPS;
1007 break;
1008 }
1009 ihost->port_speed = speed;
1010 }
1011
1012 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1013 {
1014 struct scsi_qla_host *ha = to_qla_host(shost);
1015 struct iscsi_cls_host *ihost = shost->shost_data;
1016 uint32_t state = ISCSI_PORT_STATE_DOWN;
1017
1018 if (test_bit(AF_LINK_UP, &ha->flags))
1019 state = ISCSI_PORT_STATE_UP;
1020
1021 ihost->port_state = state;
1022 }
1023
1024 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1025 enum iscsi_host_param param, char *buf)
1026 {
1027 struct scsi_qla_host *ha = to_qla_host(shost);
1028 int len;
1029
1030 switch (param) {
1031 case ISCSI_HOST_PARAM_HWADDRESS:
1032 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1033 break;
1034 case ISCSI_HOST_PARAM_IPADDRESS:
1035 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1036 break;
1037 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1038 len = sprintf(buf, "%s\n", ha->name_string);
1039 break;
1040 case ISCSI_HOST_PARAM_PORT_STATE:
1041 qla4xxx_set_port_state(shost);
1042 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1043 break;
1044 case ISCSI_HOST_PARAM_PORT_SPEED:
1045 qla4xxx_set_port_speed(shost);
1046 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1047 break;
1048 default:
1049 return -ENOSYS;
1050 }
1051
1052 return len;
1053 }
1054
1055 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1056 {
1057 if (ha->iface_ipv4)
1058 return;
1059
1060 /* IPv4 */
1061 ha->iface_ipv4 = iscsi_create_iface(ha->host,
1062 &qla4xxx_iscsi_transport,
1063 ISCSI_IFACE_TYPE_IPV4, 0, 0);
1064 if (!ha->iface_ipv4)
1065 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1066 "iface0.\n");
1067 }
1068
1069 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1070 {
1071 if (!ha->iface_ipv6_0)
1072 /* IPv6 iface-0 */
1073 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1074 &qla4xxx_iscsi_transport,
1075 ISCSI_IFACE_TYPE_IPV6, 0,
1076 0);
1077 if (!ha->iface_ipv6_0)
1078 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1079 "iface0.\n");
1080
1081 if (!ha->iface_ipv6_1)
1082 /* IPv6 iface-1 */
1083 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1084 &qla4xxx_iscsi_transport,
1085 ISCSI_IFACE_TYPE_IPV6, 1,
1086 0);
1087 if (!ha->iface_ipv6_1)
1088 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1089 "iface1.\n");
1090 }
1091
1092 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1093 {
1094 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1095 qla4xxx_create_ipv4_iface(ha);
1096
1097 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1098 qla4xxx_create_ipv6_iface(ha);
1099 }
1100
1101 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1102 {
1103 if (ha->iface_ipv4) {
1104 iscsi_destroy_iface(ha->iface_ipv4);
1105 ha->iface_ipv4 = NULL;
1106 }
1107 }
1108
1109 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1110 {
1111 if (ha->iface_ipv6_0) {
1112 iscsi_destroy_iface(ha->iface_ipv6_0);
1113 ha->iface_ipv6_0 = NULL;
1114 }
1115 if (ha->iface_ipv6_1) {
1116 iscsi_destroy_iface(ha->iface_ipv6_1);
1117 ha->iface_ipv6_1 = NULL;
1118 }
1119 }
1120
1121 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1122 {
1123 qla4xxx_destroy_ipv4_iface(ha);
1124 qla4xxx_destroy_ipv6_iface(ha);
1125 }
1126
1127 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1128 struct iscsi_iface_param_info *iface_param,
1129 struct addr_ctrl_blk *init_fw_cb)
1130 {
1131 /*
1132 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1133 * iface_num 1 is valid only for IPv6 Addr.
1134 */
1135 switch (iface_param->param) {
1136 case ISCSI_NET_PARAM_IPV6_ADDR:
1137 if (iface_param->iface_num & 0x1)
1138 /* IPv6 Addr 1 */
1139 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1140 sizeof(init_fw_cb->ipv6_addr1));
1141 else
1142 /* IPv6 Addr 0 */
1143 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1144 sizeof(init_fw_cb->ipv6_addr0));
1145 break;
1146 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1147 if (iface_param->iface_num & 0x1)
1148 break;
1149 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1150 sizeof(init_fw_cb->ipv6_if_id));
1151 break;
1152 case ISCSI_NET_PARAM_IPV6_ROUTER:
1153 if (iface_param->iface_num & 0x1)
1154 break;
1155 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1156 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1157 break;
1158 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1159 /* Autocfg applies to even interface */
1160 if (iface_param->iface_num & 0x1)
1161 break;
1162
1163 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1164 init_fw_cb->ipv6_addtl_opts &=
1165 cpu_to_le16(
1166 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1167 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1168 init_fw_cb->ipv6_addtl_opts |=
1169 cpu_to_le16(
1170 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1171 else
1172 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1173 "IPv6 addr\n");
1174 break;
1175 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1176 /* Autocfg applies to even interface */
1177 if (iface_param->iface_num & 0x1)
1178 break;
1179
1180 if (iface_param->value[0] ==
1181 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1182 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1183 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1184 else if (iface_param->value[0] ==
1185 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1186 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1187 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1188 else
1189 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1190 "IPv6 linklocal addr\n");
1191 break;
1192 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1193 /* Autocfg applies to even interface */
1194 if (iface_param->iface_num & 0x1)
1195 break;
1196
1197 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1198 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1199 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1200 break;
1201 case ISCSI_NET_PARAM_IFACE_ENABLE:
1202 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1203 init_fw_cb->ipv6_opts |=
1204 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1205 qla4xxx_create_ipv6_iface(ha);
1206 } else {
1207 init_fw_cb->ipv6_opts &=
1208 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1209 0xFFFF);
1210 qla4xxx_destroy_ipv6_iface(ha);
1211 }
1212 break;
1213 case ISCSI_NET_PARAM_VLAN_TAG:
1214 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1215 break;
1216 init_fw_cb->ipv6_vlan_tag =
1217 cpu_to_be16(*(uint16_t *)iface_param->value);
1218 break;
1219 case ISCSI_NET_PARAM_VLAN_ENABLED:
1220 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1221 init_fw_cb->ipv6_opts |=
1222 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1223 else
1224 init_fw_cb->ipv6_opts &=
1225 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1226 break;
1227 case ISCSI_NET_PARAM_MTU:
1228 init_fw_cb->eth_mtu_size =
1229 cpu_to_le16(*(uint16_t *)iface_param->value);
1230 break;
1231 case ISCSI_NET_PARAM_PORT:
1232 /* Autocfg applies to even interface */
1233 if (iface_param->iface_num & 0x1)
1234 break;
1235
1236 init_fw_cb->ipv6_port =
1237 cpu_to_le16(*(uint16_t *)iface_param->value);
1238 break;
1239 default:
1240 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1241 iface_param->param);
1242 break;
1243 }
1244 }
1245
1246 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1247 struct iscsi_iface_param_info *iface_param,
1248 struct addr_ctrl_blk *init_fw_cb)
1249 {
1250 switch (iface_param->param) {
1251 case ISCSI_NET_PARAM_IPV4_ADDR:
1252 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1253 sizeof(init_fw_cb->ipv4_addr));
1254 break;
1255 case ISCSI_NET_PARAM_IPV4_SUBNET:
1256 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1257 sizeof(init_fw_cb->ipv4_subnet));
1258 break;
1259 case ISCSI_NET_PARAM_IPV4_GW:
1260 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1261 sizeof(init_fw_cb->ipv4_gw_addr));
1262 break;
1263 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1264 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1265 init_fw_cb->ipv4_tcp_opts |=
1266 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1267 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1268 init_fw_cb->ipv4_tcp_opts &=
1269 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1270 else
1271 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1272 break;
1273 case ISCSI_NET_PARAM_IFACE_ENABLE:
1274 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1275 init_fw_cb->ipv4_ip_opts |=
1276 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1277 qla4xxx_create_ipv4_iface(ha);
1278 } else {
1279 init_fw_cb->ipv4_ip_opts &=
1280 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1281 0xFFFF);
1282 qla4xxx_destroy_ipv4_iface(ha);
1283 }
1284 break;
1285 case ISCSI_NET_PARAM_VLAN_TAG:
1286 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1287 break;
1288 init_fw_cb->ipv4_vlan_tag =
1289 cpu_to_be16(*(uint16_t *)iface_param->value);
1290 break;
1291 case ISCSI_NET_PARAM_VLAN_ENABLED:
1292 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1293 init_fw_cb->ipv4_ip_opts |=
1294 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1295 else
1296 init_fw_cb->ipv4_ip_opts &=
1297 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1298 break;
1299 case ISCSI_NET_PARAM_MTU:
1300 init_fw_cb->eth_mtu_size =
1301 cpu_to_le16(*(uint16_t *)iface_param->value);
1302 break;
1303 case ISCSI_NET_PARAM_PORT:
1304 init_fw_cb->ipv4_port =
1305 cpu_to_le16(*(uint16_t *)iface_param->value);
1306 break;
1307 default:
1308 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1309 iface_param->param);
1310 break;
1311 }
1312 }
1313
1314 static void
1315 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1316 {
1317 struct addr_ctrl_blk_def *acb;
1318 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1319 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1320 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1321 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1322 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1323 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1324 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1325 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1326 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1327 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1328 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1329 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1330 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1331 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1332 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1333 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1334 }
1335
1336 static int
1337 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1338 {
1339 struct scsi_qla_host *ha = to_qla_host(shost);
1340 int rval = 0;
1341 struct iscsi_iface_param_info *iface_param = NULL;
1342 struct addr_ctrl_blk *init_fw_cb = NULL;
1343 dma_addr_t init_fw_cb_dma;
1344 uint32_t mbox_cmd[MBOX_REG_COUNT];
1345 uint32_t mbox_sts[MBOX_REG_COUNT];
1346 uint32_t rem = len;
1347 struct nlattr *attr;
1348
1349 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1350 sizeof(struct addr_ctrl_blk),
1351 &init_fw_cb_dma, GFP_KERNEL);
1352 if (!init_fw_cb) {
1353 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1354 __func__);
1355 return -ENOMEM;
1356 }
1357
1358 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1359 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1360 memset(&mbox_sts, 0, sizeof(mbox_sts));
1361
1362 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1363 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1364 rval = -EIO;
1365 goto exit_init_fw_cb;
1366 }
1367
1368 nla_for_each_attr(attr, data, len, rem) {
1369 iface_param = nla_data(attr);
1370
1371 if (iface_param->param_type != ISCSI_NET_PARAM)
1372 continue;
1373
1374 switch (iface_param->iface_type) {
1375 case ISCSI_IFACE_TYPE_IPV4:
1376 switch (iface_param->iface_num) {
1377 case 0:
1378 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1379 break;
1380 default:
1381 /* Cannot have more than one IPv4 interface */
1382 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1383 "number = %d\n",
1384 iface_param->iface_num);
1385 break;
1386 }
1387 break;
1388 case ISCSI_IFACE_TYPE_IPV6:
1389 switch (iface_param->iface_num) {
1390 case 0:
1391 case 1:
1392 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1393 break;
1394 default:
1395 /* Cannot have more than two IPv6 interface */
1396 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1397 "number = %d\n",
1398 iface_param->iface_num);
1399 break;
1400 }
1401 break;
1402 default:
1403 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1404 break;
1405 }
1406 }
1407
1408 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1409
1410 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1411 sizeof(struct addr_ctrl_blk),
1412 FLASH_OPT_RMW_COMMIT);
1413 if (rval != QLA_SUCCESS) {
1414 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1415 __func__);
1416 rval = -EIO;
1417 goto exit_init_fw_cb;
1418 }
1419
1420 rval = qla4xxx_disable_acb(ha);
1421 if (rval != QLA_SUCCESS) {
1422 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1423 __func__);
1424 rval = -EIO;
1425 goto exit_init_fw_cb;
1426 }
1427
1428 wait_for_completion_timeout(&ha->disable_acb_comp,
1429 DISABLE_ACB_TOV * HZ);
1430
1431 qla4xxx_initcb_to_acb(init_fw_cb);
1432
1433 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1434 if (rval != QLA_SUCCESS) {
1435 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1436 __func__);
1437 rval = -EIO;
1438 goto exit_init_fw_cb;
1439 }
1440
1441 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1442 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1443 init_fw_cb_dma);
1444
1445 exit_init_fw_cb:
1446 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1447 init_fw_cb, init_fw_cb_dma);
1448
1449 return rval;
1450 }
1451
1452 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1453 enum iscsi_param param, char *buf)
1454 {
1455 struct iscsi_session *sess = cls_sess->dd_data;
1456 struct ddb_entry *ddb_entry = sess->dd_data;
1457 struct scsi_qla_host *ha = ddb_entry->ha;
1458 int rval, len;
1459 uint16_t idx;
1460
1461 switch (param) {
1462 case ISCSI_PARAM_CHAP_IN_IDX:
1463 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1464 sess->password_in, BIDI_CHAP,
1465 &idx);
1466 if (rval)
1467 len = sprintf(buf, "\n");
1468 else
1469 len = sprintf(buf, "%hu\n", idx);
1470 break;
1471 case ISCSI_PARAM_CHAP_OUT_IDX:
1472 rval = qla4xxx_get_chap_index(ha, sess->username,
1473 sess->password, LOCAL_CHAP,
1474 &idx);
1475 if (rval)
1476 len = sprintf(buf, "\n");
1477 else
1478 len = sprintf(buf, "%hu\n", idx);
1479 break;
1480 default:
1481 return iscsi_session_get_param(cls_sess, param, buf);
1482 }
1483
1484 return len;
1485 }
1486
1487 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1488 enum iscsi_param param, char *buf)
1489 {
1490 struct iscsi_conn *conn;
1491 struct qla_conn *qla_conn;
1492 struct sockaddr *dst_addr;
1493 int len = 0;
1494
1495 conn = cls_conn->dd_data;
1496 qla_conn = conn->dd_data;
1497 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1498
1499 switch (param) {
1500 case ISCSI_PARAM_CONN_PORT:
1501 case ISCSI_PARAM_CONN_ADDRESS:
1502 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1503 dst_addr, param, buf);
1504 default:
1505 return iscsi_conn_get_param(cls_conn, param, buf);
1506 }
1507
1508 return len;
1509
1510 }
1511
1512 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1513 {
1514 uint32_t mbx_sts = 0;
1515 uint16_t tmp_ddb_index;
1516 int ret;
1517
1518 get_ddb_index:
1519 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1520
1521 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1522 DEBUG2(ql4_printk(KERN_INFO, ha,
1523 "Free DDB index not available\n"));
1524 ret = QLA_ERROR;
1525 goto exit_get_ddb_index;
1526 }
1527
1528 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1529 goto get_ddb_index;
1530
1531 DEBUG2(ql4_printk(KERN_INFO, ha,
1532 "Found a free DDB index at %d\n", tmp_ddb_index));
1533 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1534 if (ret == QLA_ERROR) {
1535 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1536 ql4_printk(KERN_INFO, ha,
1537 "DDB index = %d not available trying next\n",
1538 tmp_ddb_index);
1539 goto get_ddb_index;
1540 }
1541 DEBUG2(ql4_printk(KERN_INFO, ha,
1542 "Free FW DDB not available\n"));
1543 }
1544
1545 *ddb_index = tmp_ddb_index;
1546
1547 exit_get_ddb_index:
1548 return ret;
1549 }
1550
1551 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1552 struct ddb_entry *ddb_entry,
1553 char *existing_ipaddr,
1554 char *user_ipaddr)
1555 {
1556 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1557 char formatted_ipaddr[DDB_IPADDR_LEN];
1558 int status = QLA_SUCCESS, ret = 0;
1559
1560 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1561 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1562 '\0', NULL);
1563 if (ret == 0) {
1564 status = QLA_ERROR;
1565 goto out_match;
1566 }
1567 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1568 } else {
1569 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1570 '\0', NULL);
1571 if (ret == 0) {
1572 status = QLA_ERROR;
1573 goto out_match;
1574 }
1575 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1576 }
1577
1578 if (strcmp(existing_ipaddr, formatted_ipaddr))
1579 status = QLA_ERROR;
1580
1581 out_match:
1582 return status;
1583 }
1584
1585 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1586 struct iscsi_cls_conn *cls_conn)
1587 {
1588 int idx = 0, max_ddbs, rval;
1589 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1590 struct iscsi_session *sess, *existing_sess;
1591 struct iscsi_conn *conn, *existing_conn;
1592 struct ddb_entry *ddb_entry;
1593
1594 sess = cls_sess->dd_data;
1595 conn = cls_conn->dd_data;
1596
1597 if (sess->targetname == NULL ||
1598 conn->persistent_address == NULL ||
1599 conn->persistent_port == 0)
1600 return QLA_ERROR;
1601
1602 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1603 MAX_DEV_DB_ENTRIES;
1604
1605 for (idx = 0; idx < max_ddbs; idx++) {
1606 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1607 if (ddb_entry == NULL)
1608 continue;
1609
1610 if (ddb_entry->ddb_type != FLASH_DDB)
1611 continue;
1612
1613 existing_sess = ddb_entry->sess->dd_data;
1614 existing_conn = ddb_entry->conn->dd_data;
1615
1616 if (existing_sess->targetname == NULL ||
1617 existing_conn->persistent_address == NULL ||
1618 existing_conn->persistent_port == 0)
1619 continue;
1620
1621 DEBUG2(ql4_printk(KERN_INFO, ha,
1622 "IQN = %s User IQN = %s\n",
1623 existing_sess->targetname,
1624 sess->targetname));
1625
1626 DEBUG2(ql4_printk(KERN_INFO, ha,
1627 "IP = %s User IP = %s\n",
1628 existing_conn->persistent_address,
1629 conn->persistent_address));
1630
1631 DEBUG2(ql4_printk(KERN_INFO, ha,
1632 "Port = %d User Port = %d\n",
1633 existing_conn->persistent_port,
1634 conn->persistent_port));
1635
1636 if (strcmp(existing_sess->targetname, sess->targetname))
1637 continue;
1638 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1639 existing_conn->persistent_address,
1640 conn->persistent_address);
1641 if (rval == QLA_ERROR)
1642 continue;
1643 if (existing_conn->persistent_port != conn->persistent_port)
1644 continue;
1645 break;
1646 }
1647
1648 if (idx == max_ddbs)
1649 return QLA_ERROR;
1650
1651 DEBUG2(ql4_printk(KERN_INFO, ha,
1652 "Match found in fwdb sessions\n"));
1653 return QLA_SUCCESS;
1654 }
1655
1656 static struct iscsi_cls_session *
1657 qla4xxx_session_create(struct iscsi_endpoint *ep,
1658 uint16_t cmds_max, uint16_t qdepth,
1659 uint32_t initial_cmdsn)
1660 {
1661 struct iscsi_cls_session *cls_sess;
1662 struct scsi_qla_host *ha;
1663 struct qla_endpoint *qla_ep;
1664 struct ddb_entry *ddb_entry;
1665 uint16_t ddb_index;
1666 struct iscsi_session *sess;
1667 struct sockaddr *dst_addr;
1668 int ret;
1669
1670 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1671 if (!ep) {
1672 printk(KERN_ERR "qla4xxx: missing ep.\n");
1673 return NULL;
1674 }
1675
1676 qla_ep = ep->dd_data;
1677 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1678 ha = to_qla_host(qla_ep->host);
1679
1680 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1681 if (ret == QLA_ERROR)
1682 return NULL;
1683
1684 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1685 cmds_max, sizeof(struct ddb_entry),
1686 sizeof(struct ql4_task_data),
1687 initial_cmdsn, ddb_index);
1688 if (!cls_sess)
1689 return NULL;
1690
1691 sess = cls_sess->dd_data;
1692 ddb_entry = sess->dd_data;
1693 ddb_entry->fw_ddb_index = ddb_index;
1694 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1695 ddb_entry->ha = ha;
1696 ddb_entry->sess = cls_sess;
1697 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1698 ddb_entry->ddb_change = qla4xxx_ddb_change;
1699 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1700 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1701 ha->tot_ddbs++;
1702
1703 return cls_sess;
1704 }
1705
1706 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1707 {
1708 struct iscsi_session *sess;
1709 struct ddb_entry *ddb_entry;
1710 struct scsi_qla_host *ha;
1711 unsigned long flags, wtime;
1712 struct dev_db_entry *fw_ddb_entry = NULL;
1713 dma_addr_t fw_ddb_entry_dma;
1714 uint32_t ddb_state;
1715 int ret;
1716
1717 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1718 sess = cls_sess->dd_data;
1719 ddb_entry = sess->dd_data;
1720 ha = ddb_entry->ha;
1721
1722 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1723 &fw_ddb_entry_dma, GFP_KERNEL);
1724 if (!fw_ddb_entry) {
1725 ql4_printk(KERN_ERR, ha,
1726 "%s: Unable to allocate dma buffer\n", __func__);
1727 goto destroy_session;
1728 }
1729
1730 wtime = jiffies + (HZ * LOGOUT_TOV);
1731 do {
1732 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1733 fw_ddb_entry, fw_ddb_entry_dma,
1734 NULL, NULL, &ddb_state, NULL,
1735 NULL, NULL);
1736 if (ret == QLA_ERROR)
1737 goto destroy_session;
1738
1739 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1740 (ddb_state == DDB_DS_SESSION_FAILED))
1741 goto destroy_session;
1742
1743 schedule_timeout_uninterruptible(HZ);
1744 } while ((time_after(wtime, jiffies)));
1745
1746 destroy_session:
1747 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1748
1749 spin_lock_irqsave(&ha->hardware_lock, flags);
1750 qla4xxx_free_ddb(ha, ddb_entry);
1751 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1752
1753 iscsi_session_teardown(cls_sess);
1754
1755 if (fw_ddb_entry)
1756 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1757 fw_ddb_entry, fw_ddb_entry_dma);
1758 }
1759
1760 static struct iscsi_cls_conn *
1761 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1762 {
1763 struct iscsi_cls_conn *cls_conn;
1764 struct iscsi_session *sess;
1765 struct ddb_entry *ddb_entry;
1766
1767 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1768 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1769 conn_idx);
1770 if (!cls_conn)
1771 return NULL;
1772
1773 sess = cls_sess->dd_data;
1774 ddb_entry = sess->dd_data;
1775 ddb_entry->conn = cls_conn;
1776
1777 return cls_conn;
1778 }
1779
1780 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1781 struct iscsi_cls_conn *cls_conn,
1782 uint64_t transport_fd, int is_leading)
1783 {
1784 struct iscsi_conn *conn;
1785 struct qla_conn *qla_conn;
1786 struct iscsi_endpoint *ep;
1787
1788 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1789
1790 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1791 return -EINVAL;
1792 ep = iscsi_lookup_endpoint(transport_fd);
1793 conn = cls_conn->dd_data;
1794 qla_conn = conn->dd_data;
1795 qla_conn->qla_ep = ep->dd_data;
1796 return 0;
1797 }
1798
1799 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1800 {
1801 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1802 struct iscsi_session *sess;
1803 struct ddb_entry *ddb_entry;
1804 struct scsi_qla_host *ha;
1805 struct dev_db_entry *fw_ddb_entry = NULL;
1806 dma_addr_t fw_ddb_entry_dma;
1807 uint32_t mbx_sts = 0;
1808 int ret = 0;
1809 int status = QLA_SUCCESS;
1810
1811 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1812 sess = cls_sess->dd_data;
1813 ddb_entry = sess->dd_data;
1814 ha = ddb_entry->ha;
1815
1816 /* Check if we have matching FW DDB, if yes then do not
1817 * login to this target. This could cause target to logout previous
1818 * connection
1819 */
1820 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1821 if (ret == QLA_SUCCESS) {
1822 ql4_printk(KERN_INFO, ha,
1823 "Session already exist in FW.\n");
1824 ret = -EEXIST;
1825 goto exit_conn_start;
1826 }
1827
1828 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1829 &fw_ddb_entry_dma, GFP_KERNEL);
1830 if (!fw_ddb_entry) {
1831 ql4_printk(KERN_ERR, ha,
1832 "%s: Unable to allocate dma buffer\n", __func__);
1833 ret = -ENOMEM;
1834 goto exit_conn_start;
1835 }
1836
1837 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1838 if (ret) {
1839 /* If iscsid is stopped and started then no need to do
1840 * set param again since ddb state will be already
1841 * active and FW does not allow set ddb to an
1842 * active session.
1843 */
1844 if (mbx_sts)
1845 if (ddb_entry->fw_ddb_device_state ==
1846 DDB_DS_SESSION_ACTIVE) {
1847 ddb_entry->unblock_sess(ddb_entry->sess);
1848 goto exit_set_param;
1849 }
1850
1851 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1852 __func__, ddb_entry->fw_ddb_index);
1853 goto exit_conn_start;
1854 }
1855
1856 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1857 if (status == QLA_ERROR) {
1858 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1859 sess->targetname);
1860 ret = -EINVAL;
1861 goto exit_conn_start;
1862 }
1863
1864 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1865 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1866
1867 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1868 ddb_entry->fw_ddb_device_state));
1869
1870 exit_set_param:
1871 ret = 0;
1872
1873 exit_conn_start:
1874 if (fw_ddb_entry)
1875 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1876 fw_ddb_entry, fw_ddb_entry_dma);
1877 return ret;
1878 }
1879
1880 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1881 {
1882 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1883 struct iscsi_session *sess;
1884 struct scsi_qla_host *ha;
1885 struct ddb_entry *ddb_entry;
1886 int options;
1887
1888 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1889 sess = cls_sess->dd_data;
1890 ddb_entry = sess->dd_data;
1891 ha = ddb_entry->ha;
1892
1893 options = LOGOUT_OPTION_CLOSE_SESSION;
1894 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1895 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1896 }
1897
1898 static void qla4xxx_task_work(struct work_struct *wdata)
1899 {
1900 struct ql4_task_data *task_data;
1901 struct scsi_qla_host *ha;
1902 struct passthru_status *sts;
1903 struct iscsi_task *task;
1904 struct iscsi_hdr *hdr;
1905 uint8_t *data;
1906 uint32_t data_len;
1907 struct iscsi_conn *conn;
1908 int hdr_len;
1909 itt_t itt;
1910
1911 task_data = container_of(wdata, struct ql4_task_data, task_work);
1912 ha = task_data->ha;
1913 task = task_data->task;
1914 sts = &task_data->sts;
1915 hdr_len = sizeof(struct iscsi_hdr);
1916
1917 DEBUG3(printk(KERN_INFO "Status returned\n"));
1918 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1919 DEBUG3(printk(KERN_INFO "Response buffer"));
1920 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1921
1922 conn = task->conn;
1923
1924 switch (sts->completionStatus) {
1925 case PASSTHRU_STATUS_COMPLETE:
1926 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1927 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1928 itt = sts->handle;
1929 hdr->itt = itt;
1930 data = task_data->resp_buffer + hdr_len;
1931 data_len = task_data->resp_len - hdr_len;
1932 iscsi_complete_pdu(conn, hdr, data, data_len);
1933 break;
1934 default:
1935 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1936 sts->completionStatus);
1937 break;
1938 }
1939 return;
1940 }
1941
1942 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1943 {
1944 struct ql4_task_data *task_data;
1945 struct iscsi_session *sess;
1946 struct ddb_entry *ddb_entry;
1947 struct scsi_qla_host *ha;
1948 int hdr_len;
1949
1950 sess = task->conn->session;
1951 ddb_entry = sess->dd_data;
1952 ha = ddb_entry->ha;
1953 task_data = task->dd_data;
1954 memset(task_data, 0, sizeof(struct ql4_task_data));
1955
1956 if (task->sc) {
1957 ql4_printk(KERN_INFO, ha,
1958 "%s: SCSI Commands not implemented\n", __func__);
1959 return -EINVAL;
1960 }
1961
1962 hdr_len = sizeof(struct iscsi_hdr);
1963 task_data->ha = ha;
1964 task_data->task = task;
1965
1966 if (task->data_count) {
1967 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1968 task->data_count,
1969 PCI_DMA_TODEVICE);
1970 }
1971
1972 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1973 __func__, task->conn->max_recv_dlength, hdr_len));
1974
1975 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1976 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1977 task_data->resp_len,
1978 &task_data->resp_dma,
1979 GFP_ATOMIC);
1980 if (!task_data->resp_buffer)
1981 goto exit_alloc_pdu;
1982
1983 task_data->req_len = task->data_count + hdr_len;
1984 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1985 task_data->req_len,
1986 &task_data->req_dma,
1987 GFP_ATOMIC);
1988 if (!task_data->req_buffer)
1989 goto exit_alloc_pdu;
1990
1991 task->hdr = task_data->req_buffer;
1992
1993 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1994
1995 return 0;
1996
1997 exit_alloc_pdu:
1998 if (task_data->resp_buffer)
1999 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2000 task_data->resp_buffer, task_data->resp_dma);
2001
2002 if (task_data->req_buffer)
2003 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2004 task_data->req_buffer, task_data->req_dma);
2005 return -ENOMEM;
2006 }
2007
2008 static void qla4xxx_task_cleanup(struct iscsi_task *task)
2009 {
2010 struct ql4_task_data *task_data;
2011 struct iscsi_session *sess;
2012 struct ddb_entry *ddb_entry;
2013 struct scsi_qla_host *ha;
2014 int hdr_len;
2015
2016 hdr_len = sizeof(struct iscsi_hdr);
2017 sess = task->conn->session;
2018 ddb_entry = sess->dd_data;
2019 ha = ddb_entry->ha;
2020 task_data = task->dd_data;
2021
2022 if (task->data_count) {
2023 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
2024 task->data_count, PCI_DMA_TODEVICE);
2025 }
2026
2027 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2028 __func__, task->conn->max_recv_dlength, hdr_len));
2029
2030 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2031 task_data->resp_buffer, task_data->resp_dma);
2032 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2033 task_data->req_buffer, task_data->req_dma);
2034 return;
2035 }
2036
2037 static int qla4xxx_task_xmit(struct iscsi_task *task)
2038 {
2039 struct scsi_cmnd *sc = task->sc;
2040 struct iscsi_session *sess = task->conn->session;
2041 struct ddb_entry *ddb_entry = sess->dd_data;
2042 struct scsi_qla_host *ha = ddb_entry->ha;
2043
2044 if (!sc)
2045 return qla4xxx_send_passthru0(task);
2046
2047 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
2048 __func__);
2049 return -ENOSYS;
2050 }
2051
2052 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2053 struct iscsi_bus_flash_conn *conn,
2054 struct dev_db_entry *fw_ddb_entry)
2055 {
2056 unsigned long options = 0;
2057 int rc = 0;
2058
2059 options = le16_to_cpu(fw_ddb_entry->options);
2060 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2061 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2062 rc = iscsi_switch_str_param(&sess->portal_type,
2063 PORTAL_TYPE_IPV6);
2064 if (rc)
2065 goto exit_copy;
2066 } else {
2067 rc = iscsi_switch_str_param(&sess->portal_type,
2068 PORTAL_TYPE_IPV4);
2069 if (rc)
2070 goto exit_copy;
2071 }
2072
2073 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2074 &options);
2075 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2076 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2077
2078 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2079 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2080 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2081 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2082 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2083 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2084 &options);
2085 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2086 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2087 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2088 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2089 &options);
2090 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2091 sess->discovery_auth_optional =
2092 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2093 if (test_bit(ISCSIOPT_ERL1, &options))
2094 sess->erl |= BIT_1;
2095 if (test_bit(ISCSIOPT_ERL0, &options))
2096 sess->erl |= BIT_0;
2097
2098 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2099 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2100 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2101 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2102 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2103 conn->tcp_timer_scale |= BIT_3;
2104 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2105 conn->tcp_timer_scale |= BIT_2;
2106 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2107 conn->tcp_timer_scale |= BIT_1;
2108
2109 conn->tcp_timer_scale >>= 1;
2110 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2111
2112 options = le16_to_cpu(fw_ddb_entry->ip_options);
2113 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2114
2115 conn->max_recv_dlength = BYTE_UNITS *
2116 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2117 conn->max_xmit_dlength = BYTE_UNITS *
2118 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2119 sess->first_burst = BYTE_UNITS *
2120 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2121 sess->max_burst = BYTE_UNITS *
2122 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2123 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2124 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2125 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2126 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2127 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2128 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2129 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2130 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2131 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2132 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2133 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2134 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2135 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2136 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2137 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2138 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2139
2140 sess->default_taskmgmt_timeout =
2141 le16_to_cpu(fw_ddb_entry->def_timeout);
2142 conn->port = le16_to_cpu(fw_ddb_entry->port);
2143
2144 options = le16_to_cpu(fw_ddb_entry->options);
2145 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2146 if (!conn->ipaddress) {
2147 rc = -ENOMEM;
2148 goto exit_copy;
2149 }
2150
2151 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2152 if (!conn->redirect_ipaddr) {
2153 rc = -ENOMEM;
2154 goto exit_copy;
2155 }
2156
2157 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2158 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2159
2160 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2161 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2162
2163 conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2164 if (!conn->link_local_ipv6_addr) {
2165 rc = -ENOMEM;
2166 goto exit_copy;
2167 }
2168
2169 memcpy(conn->link_local_ipv6_addr,
2170 fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2171 } else {
2172 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2173 }
2174
2175 if (fw_ddb_entry->iscsi_name[0]) {
2176 rc = iscsi_switch_str_param(&sess->targetname,
2177 (char *)fw_ddb_entry->iscsi_name);
2178 if (rc)
2179 goto exit_copy;
2180 }
2181
2182 if (fw_ddb_entry->iscsi_alias[0]) {
2183 rc = iscsi_switch_str_param(&sess->targetalias,
2184 (char *)fw_ddb_entry->iscsi_alias);
2185 if (rc)
2186 goto exit_copy;
2187 }
2188
2189 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2190
2191 exit_copy:
2192 return rc;
2193 }
2194
2195 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2196 struct iscsi_bus_flash_conn *conn,
2197 struct dev_db_entry *fw_ddb_entry)
2198 {
2199 uint16_t options;
2200 int rc = 0;
2201
2202 options = le16_to_cpu(fw_ddb_entry->options);
2203 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
2204 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2205 options |= BIT_8;
2206 else
2207 options &= ~BIT_8;
2208
2209 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2210 SET_BITVAL(sess->discovery_sess, options, BIT_4);
2211 SET_BITVAL(sess->entry_state, options, BIT_3);
2212 fw_ddb_entry->options = cpu_to_le16(options);
2213
2214 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2215 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2216 SET_BITVAL(conn->datadgst_en, options, BIT_12);
2217 SET_BITVAL(sess->imm_data_en, options, BIT_11);
2218 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2219 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2220 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2221 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2222 SET_BITVAL(conn->snack_req_en, options, BIT_6);
2223 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2224 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2225 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2226 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2227 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2228 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2229
2230 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2231 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2232 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2233 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2234 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2235 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2236 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2237 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2238 fw_ddb_entry->tcp_options = cpu_to_le16(options);
2239
2240 options = le16_to_cpu(fw_ddb_entry->ip_options);
2241 SET_BITVAL(conn->fragment_disable, options, BIT_4);
2242 fw_ddb_entry->ip_options = cpu_to_le16(options);
2243
2244 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2245 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2246 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2247 fw_ddb_entry->iscsi_max_snd_data_seg_len =
2248 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2249 fw_ddb_entry->iscsi_first_burst_len =
2250 cpu_to_le16(sess->first_burst / BYTE_UNITS);
2251 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2252 BYTE_UNITS);
2253 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2254 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2255 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2256 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2257 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2258 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2259 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2260 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2261 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2262 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2263 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2264 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
2265 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2266 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2267 fw_ddb_entry->port = cpu_to_le16(conn->port);
2268 fw_ddb_entry->def_timeout =
2269 cpu_to_le16(sess->default_taskmgmt_timeout);
2270
2271 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2272 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
2273 else
2274 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2275
2276 if (conn->ipaddress)
2277 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2278 sizeof(fw_ddb_entry->ip_addr));
2279
2280 if (conn->redirect_ipaddr)
2281 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2282 sizeof(fw_ddb_entry->tgt_addr));
2283
2284 if (conn->link_local_ipv6_addr)
2285 memcpy(fw_ddb_entry->link_local_ipv6_addr,
2286 conn->link_local_ipv6_addr,
2287 sizeof(fw_ddb_entry->link_local_ipv6_addr));
2288
2289 if (sess->targetname)
2290 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2291 sizeof(fw_ddb_entry->iscsi_name));
2292
2293 if (sess->targetalias)
2294 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2295 sizeof(fw_ddb_entry->iscsi_alias));
2296
2297 COPY_ISID(fw_ddb_entry->isid, sess->isid);
2298
2299 return rc;
2300 }
2301
2302 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
2303 struct iscsi_session *sess,
2304 struct dev_db_entry *fw_ddb_entry)
2305 {
2306 unsigned long options = 0;
2307 uint16_t ddb_link;
2308 uint16_t disc_parent;
2309
2310 options = le16_to_cpu(fw_ddb_entry->options);
2311 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2312 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2313 &options);
2314 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2315
2316 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2317 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2318 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2319 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2320 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2321 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2322 &options);
2323 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2324 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2325 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2326 &options);
2327 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2328 sess->discovery_auth_optional =
2329 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2330 if (test_bit(ISCSIOPT_ERL1, &options))
2331 sess->erl |= BIT_1;
2332 if (test_bit(ISCSIOPT_ERL0, &options))
2333 sess->erl |= BIT_0;
2334
2335 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2336 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2337 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2338 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2339 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2340 conn->tcp_timer_scale |= BIT_3;
2341 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2342 conn->tcp_timer_scale |= BIT_2;
2343 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2344 conn->tcp_timer_scale |= BIT_1;
2345
2346 conn->tcp_timer_scale >>= 1;
2347 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2348
2349 options = le16_to_cpu(fw_ddb_entry->ip_options);
2350 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2351
2352 conn->max_recv_dlength = BYTE_UNITS *
2353 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2354 conn->max_xmit_dlength = BYTE_UNITS *
2355 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2356 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2357 sess->first_burst = BYTE_UNITS *
2358 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2359 sess->max_burst = BYTE_UNITS *
2360 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2361 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2362 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2363 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2364 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2365 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2366 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2367 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2368 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
2369 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2370 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2371 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2372 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2373 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2374
2375 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
2376 if (ddb_link == DDB_ISNS)
2377 disc_parent = ISCSI_DISC_PARENT_ISNS;
2378 else if (ddb_link == DDB_NO_LINK)
2379 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2380 else if (ddb_link < MAX_DDB_ENTRIES)
2381 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
2382 else
2383 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2384
2385 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
2386 iscsi_get_discovery_parent_name(disc_parent), 0);
2387
2388 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2389 (char *)fw_ddb_entry->iscsi_alias, 0);
2390 }
2391
2392 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2393 struct dev_db_entry *fw_ddb_entry,
2394 struct iscsi_cls_session *cls_sess,
2395 struct iscsi_cls_conn *cls_conn)
2396 {
2397 int buflen = 0;
2398 struct iscsi_session *sess;
2399 struct ddb_entry *ddb_entry;
2400 struct iscsi_conn *conn;
2401 char ip_addr[DDB_IPADDR_LEN];
2402 uint16_t options = 0;
2403
2404 sess = cls_sess->dd_data;
2405 ddb_entry = sess->dd_data;
2406 conn = cls_conn->dd_data;
2407
2408 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2409
2410 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2411
2412 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
2413 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2414
2415 memset(ip_addr, 0, sizeof(ip_addr));
2416 options = le16_to_cpu(fw_ddb_entry->options);
2417 if (options & DDB_OPT_IPV6_DEVICE) {
2418 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
2419
2420 memset(ip_addr, 0, sizeof(ip_addr));
2421 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2422 } else {
2423 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
2424 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2425 }
2426
2427 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2428 (char *)ip_addr, buflen);
2429 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2430 (char *)fw_ddb_entry->iscsi_name, buflen);
2431 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2432 (char *)ha->name_string, buflen);
2433 }
2434
2435 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
2436 struct ddb_entry *ddb_entry)
2437 {
2438 struct iscsi_cls_session *cls_sess;
2439 struct iscsi_cls_conn *cls_conn;
2440 uint32_t ddb_state;
2441 dma_addr_t fw_ddb_entry_dma;
2442 struct dev_db_entry *fw_ddb_entry;
2443
2444 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2445 &fw_ddb_entry_dma, GFP_KERNEL);
2446 if (!fw_ddb_entry) {
2447 ql4_printk(KERN_ERR, ha,
2448 "%s: Unable to allocate dma buffer\n", __func__);
2449 goto exit_session_conn_fwddb_param;
2450 }
2451
2452 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2453 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2454 NULL, NULL, NULL) == QLA_ERROR) {
2455 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2456 "get_ddb_entry for fw_ddb_index %d\n",
2457 ha->host_no, __func__,
2458 ddb_entry->fw_ddb_index));
2459 goto exit_session_conn_fwddb_param;
2460 }
2461
2462 cls_sess = ddb_entry->sess;
2463
2464 cls_conn = ddb_entry->conn;
2465
2466 /* Update params */
2467 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2468
2469 exit_session_conn_fwddb_param:
2470 if (fw_ddb_entry)
2471 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2472 fw_ddb_entry, fw_ddb_entry_dma);
2473 }
2474
2475 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2476 struct ddb_entry *ddb_entry)
2477 {
2478 struct iscsi_cls_session *cls_sess;
2479 struct iscsi_cls_conn *cls_conn;
2480 struct iscsi_session *sess;
2481 struct iscsi_conn *conn;
2482 uint32_t ddb_state;
2483 dma_addr_t fw_ddb_entry_dma;
2484 struct dev_db_entry *fw_ddb_entry;
2485
2486 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2487 &fw_ddb_entry_dma, GFP_KERNEL);
2488 if (!fw_ddb_entry) {
2489 ql4_printk(KERN_ERR, ha,
2490 "%s: Unable to allocate dma buffer\n", __func__);
2491 goto exit_session_conn_param;
2492 }
2493
2494 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2495 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2496 NULL, NULL, NULL) == QLA_ERROR) {
2497 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2498 "get_ddb_entry for fw_ddb_index %d\n",
2499 ha->host_no, __func__,
2500 ddb_entry->fw_ddb_index));
2501 goto exit_session_conn_param;
2502 }
2503
2504 cls_sess = ddb_entry->sess;
2505 sess = cls_sess->dd_data;
2506
2507 cls_conn = ddb_entry->conn;
2508 conn = cls_conn->dd_data;
2509
2510 /* Update timers after login */
2511 ddb_entry->default_relogin_timeout =
2512 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2513 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2514 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2515 ddb_entry->default_time2wait =
2516 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2517
2518 /* Update params */
2519 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2520 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2521
2522 memcpy(sess->initiatorname, ha->name_string,
2523 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2524
2525 exit_session_conn_param:
2526 if (fw_ddb_entry)
2527 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2528 fw_ddb_entry, fw_ddb_entry_dma);
2529 }
2530
2531 /*
2532 * Timer routines
2533 */
2534
2535 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2536 unsigned long interval)
2537 {
2538 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2539 __func__, ha->host->host_no));
2540 init_timer(&ha->timer);
2541 ha->timer.expires = jiffies + interval * HZ;
2542 ha->timer.data = (unsigned long)ha;
2543 ha->timer.function = (void (*)(unsigned long))func;
2544 add_timer(&ha->timer);
2545 ha->timer_active = 1;
2546 }
2547
2548 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2549 {
2550 del_timer_sync(&ha->timer);
2551 ha->timer_active = 0;
2552 }
2553
2554 /***
2555 * qla4xxx_mark_device_missing - blocks the session
2556 * @cls_session: Pointer to the session to be blocked
2557 * @ddb_entry: Pointer to device database entry
2558 *
2559 * This routine marks a device missing and close connection.
2560 **/
2561 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2562 {
2563 iscsi_block_session(cls_session);
2564 }
2565
2566 /**
2567 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2568 * @ha: Pointer to host adapter structure.
2569 *
2570 * This routine marks a device missing and resets the relogin retry count.
2571 **/
2572 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2573 {
2574 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2575 }
2576
2577 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2578 struct ddb_entry *ddb_entry,
2579 struct scsi_cmnd *cmd)
2580 {
2581 struct srb *srb;
2582
2583 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2584 if (!srb)
2585 return srb;
2586
2587 kref_init(&srb->srb_ref);
2588 srb->ha = ha;
2589 srb->ddb = ddb_entry;
2590 srb->cmd = cmd;
2591 srb->flags = 0;
2592 CMD_SP(cmd) = (void *)srb;
2593
2594 return srb;
2595 }
2596
2597 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2598 {
2599 struct scsi_cmnd *cmd = srb->cmd;
2600
2601 if (srb->flags & SRB_DMA_VALID) {
2602 scsi_dma_unmap(cmd);
2603 srb->flags &= ~SRB_DMA_VALID;
2604 }
2605 CMD_SP(cmd) = NULL;
2606 }
2607
2608 void qla4xxx_srb_compl(struct kref *ref)
2609 {
2610 struct srb *srb = container_of(ref, struct srb, srb_ref);
2611 struct scsi_cmnd *cmd = srb->cmd;
2612 struct scsi_qla_host *ha = srb->ha;
2613
2614 qla4xxx_srb_free_dma(ha, srb);
2615
2616 mempool_free(srb, ha->srb_mempool);
2617
2618 cmd->scsi_done(cmd);
2619 }
2620
2621 /**
2622 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2623 * @host: scsi host
2624 * @cmd: Pointer to Linux's SCSI command structure
2625 *
2626 * Remarks:
2627 * This routine is invoked by Linux to send a SCSI command to the driver.
2628 * The mid-level driver tries to ensure that queuecommand never gets
2629 * invoked concurrently with itself or the interrupt handler (although
2630 * the interrupt handler may call this routine as part of request-
2631 * completion handling). Unfortunely, it sometimes calls the scheduler
2632 * in interrupt context which is a big NO! NO!.
2633 **/
2634 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2635 {
2636 struct scsi_qla_host *ha = to_qla_host(host);
2637 struct ddb_entry *ddb_entry = cmd->device->hostdata;
2638 struct iscsi_cls_session *sess = ddb_entry->sess;
2639 struct srb *srb;
2640 int rval;
2641
2642 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2643 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2644 cmd->result = DID_NO_CONNECT << 16;
2645 else
2646 cmd->result = DID_REQUEUE << 16;
2647 goto qc_fail_command;
2648 }
2649
2650 if (!sess) {
2651 cmd->result = DID_IMM_RETRY << 16;
2652 goto qc_fail_command;
2653 }
2654
2655 rval = iscsi_session_chkready(sess);
2656 if (rval) {
2657 cmd->result = rval;
2658 goto qc_fail_command;
2659 }
2660
2661 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2662 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2663 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2664 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2665 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2666 !test_bit(AF_ONLINE, &ha->flags) ||
2667 !test_bit(AF_LINK_UP, &ha->flags) ||
2668 test_bit(AF_LOOPBACK, &ha->flags) ||
2669 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
2670 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
2671 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2672 goto qc_host_busy;
2673
2674 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2675 if (!srb)
2676 goto qc_host_busy;
2677
2678 rval = qla4xxx_send_command_to_isp(ha, srb);
2679 if (rval != QLA_SUCCESS)
2680 goto qc_host_busy_free_sp;
2681
2682 return 0;
2683
2684 qc_host_busy_free_sp:
2685 qla4xxx_srb_free_dma(ha, srb);
2686 mempool_free(srb, ha->srb_mempool);
2687
2688 qc_host_busy:
2689 return SCSI_MLQUEUE_HOST_BUSY;
2690
2691 qc_fail_command:
2692 cmd->scsi_done(cmd);
2693
2694 return 0;
2695 }
2696
2697 /**
2698 * qla4xxx_mem_free - frees memory allocated to adapter
2699 * @ha: Pointer to host adapter structure.
2700 *
2701 * Frees memory previously allocated by qla4xxx_mem_alloc
2702 **/
2703 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2704 {
2705 if (ha->queues)
2706 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2707 ha->queues_dma);
2708
2709 if (ha->fw_dump)
2710 vfree(ha->fw_dump);
2711
2712 ha->queues_len = 0;
2713 ha->queues = NULL;
2714 ha->queues_dma = 0;
2715 ha->request_ring = NULL;
2716 ha->request_dma = 0;
2717 ha->response_ring = NULL;
2718 ha->response_dma = 0;
2719 ha->shadow_regs = NULL;
2720 ha->shadow_regs_dma = 0;
2721 ha->fw_dump = NULL;
2722 ha->fw_dump_size = 0;
2723
2724 /* Free srb pool. */
2725 if (ha->srb_mempool)
2726 mempool_destroy(ha->srb_mempool);
2727
2728 ha->srb_mempool = NULL;
2729
2730 if (ha->chap_dma_pool)
2731 dma_pool_destroy(ha->chap_dma_pool);
2732
2733 if (ha->chap_list)
2734 vfree(ha->chap_list);
2735 ha->chap_list = NULL;
2736
2737 if (ha->fw_ddb_dma_pool)
2738 dma_pool_destroy(ha->fw_ddb_dma_pool);
2739
2740 /* release io space registers */
2741 if (is_qla8022(ha)) {
2742 if (ha->nx_pcibase)
2743 iounmap(
2744 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2745 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2746 if (ha->nx_pcibase)
2747 iounmap(
2748 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2749 } else if (ha->reg) {
2750 iounmap(ha->reg);
2751 }
2752
2753 if (ha->reset_tmplt.buff)
2754 vfree(ha->reset_tmplt.buff);
2755
2756 pci_release_regions(ha->pdev);
2757 }
2758
2759 /**
2760 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2761 * @ha: Pointer to host adapter structure
2762 *
2763 * Allocates DMA memory for request and response queues. Also allocates memory
2764 * for srbs.
2765 **/
2766 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2767 {
2768 unsigned long align;
2769
2770 /* Allocate contiguous block of DMA memory for queues. */
2771 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2772 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2773 sizeof(struct shadow_regs) +
2774 MEM_ALIGN_VALUE +
2775 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2776 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2777 &ha->queues_dma, GFP_KERNEL);
2778 if (ha->queues == NULL) {
2779 ql4_printk(KERN_WARNING, ha,
2780 "Memory Allocation failed - queues.\n");
2781
2782 goto mem_alloc_error_exit;
2783 }
2784 memset(ha->queues, 0, ha->queues_len);
2785
2786 /*
2787 * As per RISC alignment requirements -- the bus-address must be a
2788 * multiple of the request-ring size (in bytes).
2789 */
2790 align = 0;
2791 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2792 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2793 (MEM_ALIGN_VALUE - 1));
2794
2795 /* Update request and response queue pointers. */
2796 ha->request_dma = ha->queues_dma + align;
2797 ha->request_ring = (struct queue_entry *) (ha->queues + align);
2798 ha->response_dma = ha->queues_dma + align +
2799 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2800 ha->response_ring = (struct queue_entry *) (ha->queues + align +
2801 (REQUEST_QUEUE_DEPTH *
2802 QUEUE_SIZE));
2803 ha->shadow_regs_dma = ha->queues_dma + align +
2804 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2805 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2806 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2807 (REQUEST_QUEUE_DEPTH *
2808 QUEUE_SIZE) +
2809 (RESPONSE_QUEUE_DEPTH *
2810 QUEUE_SIZE));
2811
2812 /* Allocate memory for srb pool. */
2813 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2814 mempool_free_slab, srb_cachep);
2815 if (ha->srb_mempool == NULL) {
2816 ql4_printk(KERN_WARNING, ha,
2817 "Memory Allocation failed - SRB Pool.\n");
2818
2819 goto mem_alloc_error_exit;
2820 }
2821
2822 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2823 CHAP_DMA_BLOCK_SIZE, 8, 0);
2824
2825 if (ha->chap_dma_pool == NULL) {
2826 ql4_printk(KERN_WARNING, ha,
2827 "%s: chap_dma_pool allocation failed..\n", __func__);
2828 goto mem_alloc_error_exit;
2829 }
2830
2831 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2832 DDB_DMA_BLOCK_SIZE, 8, 0);
2833
2834 if (ha->fw_ddb_dma_pool == NULL) {
2835 ql4_printk(KERN_WARNING, ha,
2836 "%s: fw_ddb_dma_pool allocation failed..\n",
2837 __func__);
2838 goto mem_alloc_error_exit;
2839 }
2840
2841 return QLA_SUCCESS;
2842
2843 mem_alloc_error_exit:
2844 qla4xxx_mem_free(ha);
2845 return QLA_ERROR;
2846 }
2847
2848 /**
2849 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2850 * @ha: adapter block pointer.
2851 *
2852 * Note: The caller should not hold the idc lock.
2853 **/
2854 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2855 {
2856 uint32_t temp, temp_state, temp_val;
2857 int status = QLA_SUCCESS;
2858
2859 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2860
2861 temp_state = qla82xx_get_temp_state(temp);
2862 temp_val = qla82xx_get_temp_val(temp);
2863
2864 if (temp_state == QLA82XX_TEMP_PANIC) {
2865 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2866 " exceeds maximum allowed. Hardware has been shut"
2867 " down.\n", temp_val);
2868 status = QLA_ERROR;
2869 } else if (temp_state == QLA82XX_TEMP_WARN) {
2870 if (ha->temperature == QLA82XX_TEMP_NORMAL)
2871 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2872 " degrees C exceeds operating range."
2873 " Immediate action needed.\n", temp_val);
2874 } else {
2875 if (ha->temperature == QLA82XX_TEMP_WARN)
2876 ql4_printk(KERN_INFO, ha, "Device temperature is"
2877 " now %d degrees C in normal range.\n",
2878 temp_val);
2879 }
2880 ha->temperature = temp_state;
2881 return status;
2882 }
2883
2884 /**
2885 * qla4_8xxx_check_fw_alive - Check firmware health
2886 * @ha: Pointer to host adapter structure.
2887 *
2888 * Context: Interrupt
2889 **/
2890 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2891 {
2892 uint32_t fw_heartbeat_counter;
2893 int status = QLA_SUCCESS;
2894
2895 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2896 QLA8XXX_PEG_ALIVE_COUNTER);
2897 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2898 if (fw_heartbeat_counter == 0xffffffff) {
2899 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2900 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2901 ha->host_no, __func__));
2902 return status;
2903 }
2904
2905 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2906 ha->seconds_since_last_heartbeat++;
2907 /* FW not alive after 2 seconds */
2908 if (ha->seconds_since_last_heartbeat == 2) {
2909 ha->seconds_since_last_heartbeat = 0;
2910 qla4_8xxx_dump_peg_reg(ha);
2911 status = QLA_ERROR;
2912 }
2913 } else
2914 ha->seconds_since_last_heartbeat = 0;
2915
2916 ha->fw_heartbeat_counter = fw_heartbeat_counter;
2917 return status;
2918 }
2919
2920 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2921 {
2922 uint32_t halt_status;
2923 int halt_status_unrecoverable = 0;
2924
2925 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
2926
2927 if (is_qla8022(ha)) {
2928 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2929 __func__);
2930 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2931 CRB_NIU_XG_PAUSE_CTL_P0 |
2932 CRB_NIU_XG_PAUSE_CTL_P1);
2933
2934 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2935 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
2936 __func__);
2937 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2938 halt_status_unrecoverable = 1;
2939 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2940 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2941 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2942 __func__);
2943 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
2944 halt_status_unrecoverable = 1;
2945 }
2946
2947 /*
2948 * Since we cannot change dev_state in interrupt context,
2949 * set appropriate DPC flag then wakeup DPC
2950 */
2951 if (halt_status_unrecoverable) {
2952 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2953 } else {
2954 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
2955 __func__);
2956 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2957 }
2958 qla4xxx_mailbox_premature_completion(ha);
2959 qla4xxx_wake_dpc(ha);
2960 }
2961
2962 /**
2963 * qla4_8xxx_watchdog - Poll dev state
2964 * @ha: Pointer to host adapter structure.
2965 *
2966 * Context: Interrupt
2967 **/
2968 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2969 {
2970 uint32_t dev_state;
2971 uint32_t idc_ctrl;
2972
2973 /* don't poll if reset is going on */
2974 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2975 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2976 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2977 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2978
2979 if (qla4_8xxx_check_temp(ha)) {
2980 if (is_qla8022(ha)) {
2981 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
2982 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2983 CRB_NIU_XG_PAUSE_CTL_P0 |
2984 CRB_NIU_XG_PAUSE_CTL_P1);
2985 }
2986 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2987 qla4xxx_wake_dpc(ha);
2988 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2989 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2990
2991 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
2992 __func__);
2993
2994 if (is_qla8032(ha) || is_qla8042(ha)) {
2995 idc_ctrl = qla4_83xx_rd_reg(ha,
2996 QLA83XX_IDC_DRV_CTRL);
2997 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
2998 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
2999 __func__);
3000 qla4xxx_mailbox_premature_completion(
3001 ha);
3002 }
3003 }
3004
3005 if ((is_qla8032(ha) || is_qla8042(ha)) ||
3006 (is_qla8022(ha) && !ql4xdontresethba)) {
3007 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3008 qla4xxx_wake_dpc(ha);
3009 }
3010 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3011 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3012 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
3013 __func__);
3014 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
3015 qla4xxx_wake_dpc(ha);
3016 } else {
3017 /* Check firmware health */
3018 if (qla4_8xxx_check_fw_alive(ha))
3019 qla4_8xxx_process_fw_error(ha);
3020 }
3021 }
3022 }
3023
3024 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3025 {
3026 struct iscsi_session *sess;
3027 struct ddb_entry *ddb_entry;
3028 struct scsi_qla_host *ha;
3029
3030 sess = cls_sess->dd_data;
3031 ddb_entry = sess->dd_data;
3032 ha = ddb_entry->ha;
3033
3034 if (!(ddb_entry->ddb_type == FLASH_DDB))
3035 return;
3036
3037 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
3038 !iscsi_is_session_online(cls_sess)) {
3039 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
3040 INVALID_ENTRY) {
3041 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
3042 0) {
3043 atomic_set(&ddb_entry->retry_relogin_timer,
3044 INVALID_ENTRY);
3045 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3046 set_bit(DF_RELOGIN, &ddb_entry->flags);
3047 DEBUG2(ql4_printk(KERN_INFO, ha,
3048 "%s: index [%d] login device\n",
3049 __func__, ddb_entry->fw_ddb_index));
3050 } else
3051 atomic_dec(&ddb_entry->retry_relogin_timer);
3052 }
3053 }
3054
3055 /* Wait for relogin to timeout */
3056 if (atomic_read(&ddb_entry->relogin_timer) &&
3057 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
3058 /*
3059 * If the relogin times out and the device is
3060 * still NOT ONLINE then try and relogin again.
3061 */
3062 if (!iscsi_is_session_online(cls_sess)) {
3063 /* Reset retry relogin timer */
3064 atomic_inc(&ddb_entry->relogin_retry_count);
3065 DEBUG2(ql4_printk(KERN_INFO, ha,
3066 "%s: index[%d] relogin timed out-retrying"
3067 " relogin (%d), retry (%d)\n", __func__,
3068 ddb_entry->fw_ddb_index,
3069 atomic_read(&ddb_entry->relogin_retry_count),
3070 ddb_entry->default_time2wait + 4));
3071 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3072 atomic_set(&ddb_entry->retry_relogin_timer,
3073 ddb_entry->default_time2wait + 4);
3074 }
3075 }
3076 }
3077
3078 /**
3079 * qla4xxx_timer - checks every second for work to do.
3080 * @ha: Pointer to host adapter structure.
3081 **/
3082 static void qla4xxx_timer(struct scsi_qla_host *ha)
3083 {
3084 int start_dpc = 0;
3085 uint16_t w;
3086
3087 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
3088
3089 /* If we are in the middle of AER/EEH processing
3090 * skip any processing and reschedule the timer
3091 */
3092 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3093 mod_timer(&ha->timer, jiffies + HZ);
3094 return;
3095 }
3096
3097 /* Hardware read to trigger an EEH error during mailbox waits. */
3098 if (!pci_channel_offline(ha->pdev))
3099 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3100
3101 if (is_qla80XX(ha))
3102 qla4_8xxx_watchdog(ha);
3103
3104 if (is_qla40XX(ha)) {
3105 /* Check for heartbeat interval. */
3106 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
3107 ha->heartbeat_interval != 0) {
3108 ha->seconds_since_last_heartbeat++;
3109 if (ha->seconds_since_last_heartbeat >
3110 ha->heartbeat_interval + 2)
3111 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3112 }
3113 }
3114
3115 /* Process any deferred work. */
3116 if (!list_empty(&ha->work_list))
3117 start_dpc++;
3118
3119 /* Wakeup the dpc routine for this adapter, if needed. */
3120 if (start_dpc ||
3121 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3122 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
3123 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
3124 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3125 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3126 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
3127 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
3128 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
3129 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
3130 test_bit(DPC_AEN, &ha->dpc_flags)) {
3131 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
3132 " - dpc flags = 0x%lx\n",
3133 ha->host_no, __func__, ha->dpc_flags));
3134 qla4xxx_wake_dpc(ha);
3135 }
3136
3137 /* Reschedule timer thread to call us back in one second */
3138 mod_timer(&ha->timer, jiffies + HZ);
3139
3140 DEBUG2(ha->seconds_since_last_intr++);
3141 }
3142
3143 /**
3144 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
3145 * @ha: Pointer to host adapter structure.
3146 *
3147 * This routine stalls the driver until all outstanding commands are returned.
3148 * Caller must release the Hardware Lock prior to calling this routine.
3149 **/
3150 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
3151 {
3152 uint32_t index = 0;
3153 unsigned long flags;
3154 struct scsi_cmnd *cmd;
3155
3156 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
3157
3158 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
3159 "complete\n", WAIT_CMD_TOV));
3160
3161 while (!time_after_eq(jiffies, wtime)) {
3162 spin_lock_irqsave(&ha->hardware_lock, flags);
3163 /* Find a command that hasn't completed. */
3164 for (index = 0; index < ha->host->can_queue; index++) {
3165 cmd = scsi_host_find_tag(ha->host, index);
3166 /*
3167 * We cannot just check if the index is valid,
3168 * becase if we are run from the scsi eh, then
3169 * the scsi/block layer is going to prevent
3170 * the tag from being released.
3171 */
3172 if (cmd != NULL && CMD_SP(cmd))
3173 break;
3174 }
3175 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3176
3177 /* If No Commands are pending, wait is complete */
3178 if (index == ha->host->can_queue)
3179 return QLA_SUCCESS;
3180
3181 msleep(1000);
3182 }
3183 /* If we timed out on waiting for commands to come back
3184 * return ERROR. */
3185 return QLA_ERROR;
3186 }
3187
3188 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
3189 {
3190 uint32_t ctrl_status;
3191 unsigned long flags = 0;
3192
3193 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
3194
3195 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
3196 return QLA_ERROR;
3197
3198 spin_lock_irqsave(&ha->hardware_lock, flags);
3199
3200 /*
3201 * If the SCSI Reset Interrupt bit is set, clear it.
3202 * Otherwise, the Soft Reset won't work.
3203 */
3204 ctrl_status = readw(&ha->reg->ctrl_status);
3205 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
3206 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3207
3208 /* Issue Soft Reset */
3209 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
3210 readl(&ha->reg->ctrl_status);
3211
3212 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3213 return QLA_SUCCESS;
3214 }
3215
3216 /**
3217 * qla4xxx_soft_reset - performs soft reset.
3218 * @ha: Pointer to host adapter structure.
3219 **/
3220 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
3221 {
3222 uint32_t max_wait_time;
3223 unsigned long flags = 0;
3224 int status;
3225 uint32_t ctrl_status;
3226
3227 status = qla4xxx_hw_reset(ha);
3228 if (status != QLA_SUCCESS)
3229 return status;
3230
3231 status = QLA_ERROR;
3232 /* Wait until the Network Reset Intr bit is cleared */
3233 max_wait_time = RESET_INTR_TOV;
3234 do {
3235 spin_lock_irqsave(&ha->hardware_lock, flags);
3236 ctrl_status = readw(&ha->reg->ctrl_status);
3237 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3238
3239 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
3240 break;
3241
3242 msleep(1000);
3243 } while ((--max_wait_time));
3244
3245 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
3246 DEBUG2(printk(KERN_WARNING
3247 "scsi%ld: Network Reset Intr not cleared by "
3248 "Network function, clearing it now!\n",
3249 ha->host_no));
3250 spin_lock_irqsave(&ha->hardware_lock, flags);
3251 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
3252 readl(&ha->reg->ctrl_status);
3253 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3254 }
3255
3256 /* Wait until the firmware tells us the Soft Reset is done */
3257 max_wait_time = SOFT_RESET_TOV;
3258 do {
3259 spin_lock_irqsave(&ha->hardware_lock, flags);
3260 ctrl_status = readw(&ha->reg->ctrl_status);
3261 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3262
3263 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
3264 status = QLA_SUCCESS;
3265 break;
3266 }
3267
3268 msleep(1000);
3269 } while ((--max_wait_time));
3270
3271 /*
3272 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
3273 * after the soft reset has taken place.
3274 */
3275 spin_lock_irqsave(&ha->hardware_lock, flags);
3276 ctrl_status = readw(&ha->reg->ctrl_status);
3277 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
3278 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3279 readl(&ha->reg->ctrl_status);
3280 }
3281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3282
3283 /* If soft reset fails then most probably the bios on other
3284 * function is also enabled.
3285 * Since the initialization is sequential the other fn
3286 * wont be able to acknowledge the soft reset.
3287 * Issue a force soft reset to workaround this scenario.
3288 */
3289 if (max_wait_time == 0) {
3290 /* Issue Force Soft Reset */
3291 spin_lock_irqsave(&ha->hardware_lock, flags);
3292 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
3293 readl(&ha->reg->ctrl_status);
3294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3295 /* Wait until the firmware tells us the Soft Reset is done */
3296 max_wait_time = SOFT_RESET_TOV;
3297 do {
3298 spin_lock_irqsave(&ha->hardware_lock, flags);
3299 ctrl_status = readw(&ha->reg->ctrl_status);
3300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3301
3302 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
3303 status = QLA_SUCCESS;
3304 break;
3305 }
3306
3307 msleep(1000);
3308 } while ((--max_wait_time));
3309 }
3310
3311 return status;
3312 }
3313
3314 /**
3315 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
3316 * @ha: Pointer to host adapter structure.
3317 * @res: returned scsi status
3318 *
3319 * This routine is called just prior to a HARD RESET to return all
3320 * outstanding commands back to the Operating System.
3321 * Caller should make sure that the following locks are released
3322 * before this calling routine: Hardware lock, and io_request_lock.
3323 **/
3324 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
3325 {
3326 struct srb *srb;
3327 int i;
3328 unsigned long flags;
3329
3330 spin_lock_irqsave(&ha->hardware_lock, flags);
3331 for (i = 0; i < ha->host->can_queue; i++) {
3332 srb = qla4xxx_del_from_active_array(ha, i);
3333 if (srb != NULL) {
3334 srb->cmd->result = res;
3335 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3336 }
3337 }
3338 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3339 }
3340
3341 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
3342 {
3343 clear_bit(AF_ONLINE, &ha->flags);
3344
3345 /* Disable the board */
3346 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
3347
3348 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3349 qla4xxx_mark_all_devices_missing(ha);
3350 clear_bit(AF_INIT_DONE, &ha->flags);
3351 }
3352
3353 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
3354 {
3355 struct iscsi_session *sess;
3356 struct ddb_entry *ddb_entry;
3357
3358 sess = cls_session->dd_data;
3359 ddb_entry = sess->dd_data;
3360 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
3361
3362 if (ddb_entry->ddb_type == FLASH_DDB)
3363 iscsi_block_session(ddb_entry->sess);
3364 else
3365 iscsi_session_failure(cls_session->dd_data,
3366 ISCSI_ERR_CONN_FAILED);
3367 }
3368
3369 /**
3370 * qla4xxx_recover_adapter - recovers adapter after a fatal error
3371 * @ha: Pointer to host adapter structure.
3372 **/
3373 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3374 {
3375 int status = QLA_ERROR;
3376 uint8_t reset_chip = 0;
3377 uint32_t dev_state;
3378 unsigned long wait;
3379
3380 /* Stall incoming I/O until we are done */
3381 scsi_block_requests(ha->host);
3382 clear_bit(AF_ONLINE, &ha->flags);
3383 clear_bit(AF_LINK_UP, &ha->flags);
3384
3385 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
3386
3387 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3388
3389 if ((is_qla8032(ha) || is_qla8042(ha)) &&
3390 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3391 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3392 __func__);
3393 /* disable pause frame for ISP83xx */
3394 qla4_83xx_disable_pause(ha);
3395 }
3396
3397 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3398
3399 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3400 reset_chip = 1;
3401
3402 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
3403 * do not reset adapter, jump to initialize_adapter */
3404 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3405 status = QLA_SUCCESS;
3406 goto recover_ha_init_adapter;
3407 }
3408
3409 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
3410 * from eh_host_reset or ioctl module */
3411 if (is_qla80XX(ha) && !reset_chip &&
3412 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3413
3414 DEBUG2(ql4_printk(KERN_INFO, ha,
3415 "scsi%ld: %s - Performing stop_firmware...\n",
3416 ha->host_no, __func__));
3417 status = ha->isp_ops->reset_firmware(ha);
3418 if (status == QLA_SUCCESS) {
3419 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3420 qla4xxx_cmd_wait(ha);
3421
3422 ha->isp_ops->disable_intrs(ha);
3423 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3424 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3425 } else {
3426 /* If the stop_firmware fails then
3427 * reset the entire chip */
3428 reset_chip = 1;
3429 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3430 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3431 }
3432 }
3433
3434 /* Issue full chip reset if recovering from a catastrophic error,
3435 * or if stop_firmware fails for ISP-8xxx.
3436 * This is the default case for ISP-4xxx */
3437 if (is_qla40XX(ha) || reset_chip) {
3438 if (is_qla40XX(ha))
3439 goto chip_reset;
3440
3441 /* Check if 8XXX firmware is alive or not
3442 * We may have arrived here from NEED_RESET
3443 * detection only */
3444 if (test_bit(AF_FW_RECOVERY, &ha->flags))
3445 goto chip_reset;
3446
3447 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
3448 while (time_before(jiffies, wait)) {
3449 if (qla4_8xxx_check_fw_alive(ha)) {
3450 qla4xxx_mailbox_premature_completion(ha);
3451 break;
3452 }
3453
3454 set_current_state(TASK_UNINTERRUPTIBLE);
3455 schedule_timeout(HZ);
3456 }
3457 chip_reset:
3458 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3459 qla4xxx_cmd_wait(ha);
3460
3461 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3462 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3463 DEBUG2(ql4_printk(KERN_INFO, ha,
3464 "scsi%ld: %s - Performing chip reset..\n",
3465 ha->host_no, __func__));
3466 status = ha->isp_ops->reset_chip(ha);
3467 }
3468
3469 /* Flush any pending ddb changed AENs */
3470 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3471
3472 recover_ha_init_adapter:
3473 /* Upon successful firmware/chip reset, re-initialize the adapter */
3474 if (status == QLA_SUCCESS) {
3475 /* For ISP-4xxx, force function 1 to always initialize
3476 * before function 3 to prevent both funcions from
3477 * stepping on top of the other */
3478 if (is_qla40XX(ha) && (ha->mac_index == 3))
3479 ssleep(6);
3480
3481 /* NOTE: AF_ONLINE flag set upon successful completion of
3482 * qla4xxx_initialize_adapter */
3483 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3484 }
3485
3486 /* Retry failed adapter initialization, if necessary
3487 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3488 * case to prevent ping-pong resets between functions */
3489 if (!test_bit(AF_ONLINE, &ha->flags) &&
3490 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3491 /* Adapter initialization failed, see if we can retry
3492 * resetting the ha.
3493 * Since we don't want to block the DPC for too long
3494 * with multiple resets in the same thread,
3495 * utilize DPC to retry */
3496 if (is_qla80XX(ha)) {
3497 ha->isp_ops->idc_lock(ha);
3498 dev_state = qla4_8xxx_rd_direct(ha,
3499 QLA8XXX_CRB_DEV_STATE);
3500 ha->isp_ops->idc_unlock(ha);
3501 if (dev_state == QLA8XXX_DEV_FAILED) {
3502 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3503 "recover adapter. H/W is in Failed "
3504 "state\n", __func__);
3505 qla4xxx_dead_adapter_cleanup(ha);
3506 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3507 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3508 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3509 &ha->dpc_flags);
3510 status = QLA_ERROR;
3511
3512 goto exit_recover;
3513 }
3514 }
3515
3516 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3517 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3518 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3519 "(%d) more times\n", ha->host_no,
3520 ha->retry_reset_ha_cnt));
3521 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3522 status = QLA_ERROR;
3523 } else {
3524 if (ha->retry_reset_ha_cnt > 0) {
3525 /* Schedule another Reset HA--DPC will retry */
3526 ha->retry_reset_ha_cnt--;
3527 DEBUG2(printk("scsi%ld: recover adapter - "
3528 "retry remaining %d\n",
3529 ha->host_no,
3530 ha->retry_reset_ha_cnt));
3531 status = QLA_ERROR;
3532 }
3533
3534 if (ha->retry_reset_ha_cnt == 0) {
3535 /* Recover adapter retries have been exhausted.
3536 * Adapter DEAD */
3537 DEBUG2(printk("scsi%ld: recover adapter "
3538 "failed - board disabled\n",
3539 ha->host_no));
3540 qla4xxx_dead_adapter_cleanup(ha);
3541 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3542 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3543 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3544 &ha->dpc_flags);
3545 status = QLA_ERROR;
3546 }
3547 }
3548 } else {
3549 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3550 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3551 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3552 }
3553
3554 exit_recover:
3555 ha->adapter_error_count++;
3556
3557 if (test_bit(AF_ONLINE, &ha->flags))
3558 ha->isp_ops->enable_intrs(ha);
3559
3560 scsi_unblock_requests(ha->host);
3561
3562 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3563 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3564 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3565
3566 return status;
3567 }
3568
3569 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3570 {
3571 struct iscsi_session *sess;
3572 struct ddb_entry *ddb_entry;
3573 struct scsi_qla_host *ha;
3574
3575 sess = cls_session->dd_data;
3576 ddb_entry = sess->dd_data;
3577 ha = ddb_entry->ha;
3578 if (!iscsi_is_session_online(cls_session)) {
3579 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3580 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3581 " unblock session\n", ha->host_no, __func__,
3582 ddb_entry->fw_ddb_index);
3583 iscsi_unblock_session(ddb_entry->sess);
3584 } else {
3585 /* Trigger relogin */
3586 if (ddb_entry->ddb_type == FLASH_DDB) {
3587 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
3588 test_bit(DF_DISABLE_RELOGIN,
3589 &ddb_entry->flags)))
3590 qla4xxx_arm_relogin_timer(ddb_entry);
3591 } else
3592 iscsi_session_failure(cls_session->dd_data,
3593 ISCSI_ERR_CONN_FAILED);
3594 }
3595 }
3596 }
3597
3598 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3599 {
3600 struct iscsi_session *sess;
3601 struct ddb_entry *ddb_entry;
3602 struct scsi_qla_host *ha;
3603
3604 sess = cls_session->dd_data;
3605 ddb_entry = sess->dd_data;
3606 ha = ddb_entry->ha;
3607 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3608 " unblock session\n", ha->host_no, __func__,
3609 ddb_entry->fw_ddb_index);
3610
3611 iscsi_unblock_session(ddb_entry->sess);
3612
3613 /* Start scan target */
3614 if (test_bit(AF_ONLINE, &ha->flags)) {
3615 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3616 " start scan\n", ha->host_no, __func__,
3617 ddb_entry->fw_ddb_index);
3618 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3619 }
3620 return QLA_SUCCESS;
3621 }
3622
3623 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3624 {
3625 struct iscsi_session *sess;
3626 struct ddb_entry *ddb_entry;
3627 struct scsi_qla_host *ha;
3628 int status = QLA_SUCCESS;
3629
3630 sess = cls_session->dd_data;
3631 ddb_entry = sess->dd_data;
3632 ha = ddb_entry->ha;
3633 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3634 " unblock user space session\n", ha->host_no, __func__,
3635 ddb_entry->fw_ddb_index);
3636
3637 if (!iscsi_is_session_online(cls_session)) {
3638 iscsi_conn_start(ddb_entry->conn);
3639 iscsi_conn_login_event(ddb_entry->conn,
3640 ISCSI_CONN_STATE_LOGGED_IN);
3641 } else {
3642 ql4_printk(KERN_INFO, ha,
3643 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3644 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3645 cls_session->sid);
3646 status = QLA_ERROR;
3647 }
3648
3649 return status;
3650 }
3651
3652 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3653 {
3654 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3655 }
3656
3657 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3658 {
3659 uint16_t relogin_timer;
3660 struct iscsi_session *sess;
3661 struct ddb_entry *ddb_entry;
3662 struct scsi_qla_host *ha;
3663
3664 sess = cls_sess->dd_data;
3665 ddb_entry = sess->dd_data;
3666 ha = ddb_entry->ha;
3667
3668 relogin_timer = max(ddb_entry->default_relogin_timeout,
3669 (uint16_t)RELOGIN_TOV);
3670 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3671
3672 DEBUG2(ql4_printk(KERN_INFO, ha,
3673 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3674 ddb_entry->fw_ddb_index, relogin_timer));
3675
3676 qla4xxx_login_flash_ddb(cls_sess);
3677 }
3678
3679 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3680 {
3681 struct iscsi_session *sess;
3682 struct ddb_entry *ddb_entry;
3683 struct scsi_qla_host *ha;
3684
3685 sess = cls_sess->dd_data;
3686 ddb_entry = sess->dd_data;
3687 ha = ddb_entry->ha;
3688
3689 if (!(ddb_entry->ddb_type == FLASH_DDB))
3690 return;
3691
3692 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
3693 return;
3694
3695 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3696 !iscsi_is_session_online(cls_sess)) {
3697 DEBUG2(ql4_printk(KERN_INFO, ha,
3698 "relogin issued\n"));
3699 qla4xxx_relogin_flash_ddb(cls_sess);
3700 }
3701 }
3702
3703 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3704 {
3705 if (ha->dpc_thread)
3706 queue_work(ha->dpc_thread, &ha->dpc_work);
3707 }
3708
3709 static struct qla4_work_evt *
3710 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3711 enum qla4_work_type type)
3712 {
3713 struct qla4_work_evt *e;
3714 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3715
3716 e = kzalloc(size, GFP_ATOMIC);
3717 if (!e)
3718 return NULL;
3719
3720 INIT_LIST_HEAD(&e->list);
3721 e->type = type;
3722 return e;
3723 }
3724
3725 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3726 struct qla4_work_evt *e)
3727 {
3728 unsigned long flags;
3729
3730 spin_lock_irqsave(&ha->work_lock, flags);
3731 list_add_tail(&e->list, &ha->work_list);
3732 spin_unlock_irqrestore(&ha->work_lock, flags);
3733 qla4xxx_wake_dpc(ha);
3734 }
3735
3736 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3737 enum iscsi_host_event_code aen_code,
3738 uint32_t data_size, uint8_t *data)
3739 {
3740 struct qla4_work_evt *e;
3741
3742 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3743 if (!e)
3744 return QLA_ERROR;
3745
3746 e->u.aen.code = aen_code;
3747 e->u.aen.data_size = data_size;
3748 memcpy(e->u.aen.data, data, data_size);
3749
3750 qla4xxx_post_work(ha, e);
3751
3752 return QLA_SUCCESS;
3753 }
3754
3755 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3756 uint32_t status, uint32_t pid,
3757 uint32_t data_size, uint8_t *data)
3758 {
3759 struct qla4_work_evt *e;
3760
3761 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3762 if (!e)
3763 return QLA_ERROR;
3764
3765 e->u.ping.status = status;
3766 e->u.ping.pid = pid;
3767 e->u.ping.data_size = data_size;
3768 memcpy(e->u.ping.data, data, data_size);
3769
3770 qla4xxx_post_work(ha, e);
3771
3772 return QLA_SUCCESS;
3773 }
3774
3775 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3776 {
3777 struct qla4_work_evt *e, *tmp;
3778 unsigned long flags;
3779 LIST_HEAD(work);
3780
3781 spin_lock_irqsave(&ha->work_lock, flags);
3782 list_splice_init(&ha->work_list, &work);
3783 spin_unlock_irqrestore(&ha->work_lock, flags);
3784
3785 list_for_each_entry_safe(e, tmp, &work, list) {
3786 list_del_init(&e->list);
3787
3788 switch (e->type) {
3789 case QLA4_EVENT_AEN:
3790 iscsi_post_host_event(ha->host_no,
3791 &qla4xxx_iscsi_transport,
3792 e->u.aen.code,
3793 e->u.aen.data_size,
3794 e->u.aen.data);
3795 break;
3796 case QLA4_EVENT_PING_STATUS:
3797 iscsi_ping_comp_event(ha->host_no,
3798 &qla4xxx_iscsi_transport,
3799 e->u.ping.status,
3800 e->u.ping.pid,
3801 e->u.ping.data_size,
3802 e->u.ping.data);
3803 break;
3804 default:
3805 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3806 "supported", e->type);
3807 }
3808 kfree(e);
3809 }
3810 }
3811
3812 /**
3813 * qla4xxx_do_dpc - dpc routine
3814 * @data: in our case pointer to adapter structure
3815 *
3816 * This routine is a task that is schedule by the interrupt handler
3817 * to perform the background processing for interrupts. We put it
3818 * on a task queue that is consumed whenever the scheduler runs; that's
3819 * so you can do anything (i.e. put the process to sleep etc). In fact,
3820 * the mid-level tries to sleep when it reaches the driver threshold
3821 * "host->can_queue". This can cause a panic if we were in our interrupt code.
3822 **/
3823 static void qla4xxx_do_dpc(struct work_struct *work)
3824 {
3825 struct scsi_qla_host *ha =
3826 container_of(work, struct scsi_qla_host, dpc_work);
3827 int status = QLA_ERROR;
3828
3829 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3830 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3831 ha->host_no, __func__, ha->flags, ha->dpc_flags))
3832
3833 /* Initialization not yet finished. Don't do anything yet. */
3834 if (!test_bit(AF_INIT_DONE, &ha->flags))
3835 return;
3836
3837 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3838 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3839 ha->host_no, __func__, ha->flags));
3840 return;
3841 }
3842
3843 /* post events to application */
3844 qla4xxx_do_work(ha);
3845
3846 if (is_qla80XX(ha)) {
3847 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3848 if (is_qla8032(ha) || is_qla8042(ha)) {
3849 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3850 __func__);
3851 /* disable pause frame for ISP83xx */
3852 qla4_83xx_disable_pause(ha);
3853 }
3854
3855 ha->isp_ops->idc_lock(ha);
3856 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3857 QLA8XXX_DEV_FAILED);
3858 ha->isp_ops->idc_unlock(ha);
3859 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3860 qla4_8xxx_device_state_handler(ha);
3861 }
3862
3863 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
3864 if (is_qla8042(ha)) {
3865 if (ha->idc_info.info2 &
3866 ENABLE_INTERNAL_LOOPBACK) {
3867 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
3868 __func__);
3869 status = qla4_84xx_config_acb(ha,
3870 ACB_CONFIG_DISABLE);
3871 if (status != QLA_SUCCESS) {
3872 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
3873 __func__);
3874 }
3875 }
3876 }
3877 qla4_83xx_post_idc_ack(ha);
3878 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
3879 }
3880
3881 if (is_qla8042(ha) &&
3882 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
3883 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
3884 __func__);
3885 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
3886 QLA_SUCCESS) {
3887 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
3888 __func__);
3889 }
3890 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
3891 }
3892
3893 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3894 qla4_8xxx_need_qsnt_handler(ha);
3895 }
3896 }
3897
3898 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3899 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3900 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3901 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3902 if ((is_qla8022(ha) && ql4xdontresethba) ||
3903 ((is_qla8032(ha) || is_qla8042(ha)) &&
3904 qla4_83xx_idc_dontreset(ha))) {
3905 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3906 ha->host_no, __func__));
3907 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3908 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3909 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3910 goto dpc_post_reset_ha;
3911 }
3912 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3913 test_bit(DPC_RESET_HA, &ha->dpc_flags))
3914 qla4xxx_recover_adapter(ha);
3915
3916 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3917 uint8_t wait_time = RESET_INTR_TOV;
3918
3919 while ((readw(&ha->reg->ctrl_status) &
3920 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3921 if (--wait_time == 0)
3922 break;
3923 msleep(1000);
3924 }
3925 if (wait_time == 0)
3926 DEBUG2(printk("scsi%ld: %s: SR|FSR "
3927 "bit not cleared-- resetting\n",
3928 ha->host_no, __func__));
3929 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3930 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3931 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3932 status = qla4xxx_recover_adapter(ha);
3933 }
3934 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3935 if (status == QLA_SUCCESS)
3936 ha->isp_ops->enable_intrs(ha);
3937 }
3938 }
3939
3940 dpc_post_reset_ha:
3941 /* ---- process AEN? --- */
3942 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3943 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3944
3945 /* ---- Get DHCP IP Address? --- */
3946 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3947 qla4xxx_get_dhcp_ip_address(ha);
3948
3949 /* ---- relogin device? --- */
3950 if (adapter_up(ha) &&
3951 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3952 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3953 }
3954
3955 /* ---- link change? --- */
3956 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
3957 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3958 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3959 /* ---- link down? --- */
3960 qla4xxx_mark_all_devices_missing(ha);
3961 } else {
3962 /* ---- link up? --- *
3963 * F/W will auto login to all devices ONLY ONCE after
3964 * link up during driver initialization and runtime
3965 * fatal error recovery. Therefore, the driver must
3966 * manually relogin to devices when recovering from
3967 * connection failures, logouts, expired KATO, etc. */
3968 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3969 qla4xxx_build_ddb_list(ha, ha->is_reset);
3970 iscsi_host_for_each_session(ha->host,
3971 qla4xxx_login_flash_ddb);
3972 } else
3973 qla4xxx_relogin_all_devices(ha);
3974 }
3975 }
3976 }
3977
3978 /**
3979 * qla4xxx_free_adapter - release the adapter
3980 * @ha: pointer to adapter structure
3981 **/
3982 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3983 {
3984 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3985
3986 /* Turn-off interrupts on the card. */
3987 ha->isp_ops->disable_intrs(ha);
3988
3989 if (is_qla40XX(ha)) {
3990 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3991 &ha->reg->ctrl_status);
3992 readl(&ha->reg->ctrl_status);
3993 } else if (is_qla8022(ha)) {
3994 writel(0, &ha->qla4_82xx_reg->host_int);
3995 readl(&ha->qla4_82xx_reg->host_int);
3996 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3997 writel(0, &ha->qla4_83xx_reg->risc_intr);
3998 readl(&ha->qla4_83xx_reg->risc_intr);
3999 }
4000
4001 /* Remove timer thread, if present */
4002 if (ha->timer_active)
4003 qla4xxx_stop_timer(ha);
4004
4005 /* Kill the kernel thread for this host */
4006 if (ha->dpc_thread)
4007 destroy_workqueue(ha->dpc_thread);
4008
4009 /* Kill the kernel thread for this host */
4010 if (ha->task_wq)
4011 destroy_workqueue(ha->task_wq);
4012
4013 /* Put firmware in known state */
4014 ha->isp_ops->reset_firmware(ha);
4015
4016 if (is_qla80XX(ha)) {
4017 ha->isp_ops->idc_lock(ha);
4018 qla4_8xxx_clear_drv_active(ha);
4019 ha->isp_ops->idc_unlock(ha);
4020 }
4021
4022 /* Detach interrupts */
4023 qla4xxx_free_irqs(ha);
4024
4025 /* free extra memory */
4026 qla4xxx_mem_free(ha);
4027 }
4028
4029 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
4030 {
4031 int status = 0;
4032 unsigned long mem_base, mem_len, db_base, db_len;
4033 struct pci_dev *pdev = ha->pdev;
4034
4035 status = pci_request_regions(pdev, DRIVER_NAME);
4036 if (status) {
4037 printk(KERN_WARNING
4038 "scsi(%ld) Failed to reserve PIO regions (%s) "
4039 "status=%d\n", ha->host_no, pci_name(pdev), status);
4040 goto iospace_error_exit;
4041 }
4042
4043 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
4044 __func__, pdev->revision));
4045 ha->revision_id = pdev->revision;
4046
4047 /* remap phys address */
4048 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
4049 mem_len = pci_resource_len(pdev, 0);
4050 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
4051 __func__, mem_base, mem_len));
4052
4053 /* mapping of pcibase pointer */
4054 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
4055 if (!ha->nx_pcibase) {
4056 printk(KERN_ERR
4057 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
4058 pci_release_regions(ha->pdev);
4059 goto iospace_error_exit;
4060 }
4061
4062 /* Mapping of IO base pointer, door bell read and write pointer */
4063
4064 /* mapping of IO base pointer */
4065 if (is_qla8022(ha)) {
4066 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
4067 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
4068 (ha->pdev->devfn << 11));
4069 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
4070 QLA82XX_CAM_RAM_DB2);
4071 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4072 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
4073 ((uint8_t *)ha->nx_pcibase);
4074 }
4075
4076 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
4077 db_len = pci_resource_len(pdev, 4);
4078
4079 return 0;
4080 iospace_error_exit:
4081 return -ENOMEM;
4082 }
4083
4084 /***
4085 * qla4xxx_iospace_config - maps registers
4086 * @ha: pointer to adapter structure
4087 *
4088 * This routines maps HBA's registers from the pci address space
4089 * into the kernel virtual address space for memory mapped i/o.
4090 **/
4091 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
4092 {
4093 unsigned long pio, pio_len, pio_flags;
4094 unsigned long mmio, mmio_len, mmio_flags;
4095
4096 pio = pci_resource_start(ha->pdev, 0);
4097 pio_len = pci_resource_len(ha->pdev, 0);
4098 pio_flags = pci_resource_flags(ha->pdev, 0);
4099 if (pio_flags & IORESOURCE_IO) {
4100 if (pio_len < MIN_IOBASE_LEN) {
4101 ql4_printk(KERN_WARNING, ha,
4102 "Invalid PCI I/O region size\n");
4103 pio = 0;
4104 }
4105 } else {
4106 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
4107 pio = 0;
4108 }
4109
4110 /* Use MMIO operations for all accesses. */
4111 mmio = pci_resource_start(ha->pdev, 1);
4112 mmio_len = pci_resource_len(ha->pdev, 1);
4113 mmio_flags = pci_resource_flags(ha->pdev, 1);
4114
4115 if (!(mmio_flags & IORESOURCE_MEM)) {
4116 ql4_printk(KERN_ERR, ha,
4117 "region #0 not an MMIO resource, aborting\n");
4118
4119 goto iospace_error_exit;
4120 }
4121
4122 if (mmio_len < MIN_IOBASE_LEN) {
4123 ql4_printk(KERN_ERR, ha,
4124 "Invalid PCI mem region size, aborting\n");
4125 goto iospace_error_exit;
4126 }
4127
4128 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
4129 ql4_printk(KERN_WARNING, ha,
4130 "Failed to reserve PIO/MMIO regions\n");
4131
4132 goto iospace_error_exit;
4133 }
4134
4135 ha->pio_address = pio;
4136 ha->pio_length = pio_len;
4137 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
4138 if (!ha->reg) {
4139 ql4_printk(KERN_ERR, ha,
4140 "cannot remap MMIO, aborting\n");
4141
4142 goto iospace_error_exit;
4143 }
4144
4145 return 0;
4146
4147 iospace_error_exit:
4148 return -ENOMEM;
4149 }
4150
4151 static struct isp_operations qla4xxx_isp_ops = {
4152 .iospace_config = qla4xxx_iospace_config,
4153 .pci_config = qla4xxx_pci_config,
4154 .disable_intrs = qla4xxx_disable_intrs,
4155 .enable_intrs = qla4xxx_enable_intrs,
4156 .start_firmware = qla4xxx_start_firmware,
4157 .intr_handler = qla4xxx_intr_handler,
4158 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
4159 .reset_chip = qla4xxx_soft_reset,
4160 .reset_firmware = qla4xxx_hw_reset,
4161 .queue_iocb = qla4xxx_queue_iocb,
4162 .complete_iocb = qla4xxx_complete_iocb,
4163 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
4164 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
4165 .get_sys_info = qla4xxx_get_sys_info,
4166 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
4167 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
4168 };
4169
4170 static struct isp_operations qla4_82xx_isp_ops = {
4171 .iospace_config = qla4_8xxx_iospace_config,
4172 .pci_config = qla4_8xxx_pci_config,
4173 .disable_intrs = qla4_82xx_disable_intrs,
4174 .enable_intrs = qla4_82xx_enable_intrs,
4175 .start_firmware = qla4_8xxx_load_risc,
4176 .restart_firmware = qla4_82xx_try_start_fw,
4177 .intr_handler = qla4_82xx_intr_handler,
4178 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
4179 .need_reset = qla4_8xxx_need_reset,
4180 .reset_chip = qla4_82xx_isp_reset,
4181 .reset_firmware = qla4_8xxx_stop_firmware,
4182 .queue_iocb = qla4_82xx_queue_iocb,
4183 .complete_iocb = qla4_82xx_complete_iocb,
4184 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
4185 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
4186 .get_sys_info = qla4_8xxx_get_sys_info,
4187 .rd_reg_direct = qla4_82xx_rd_32,
4188 .wr_reg_direct = qla4_82xx_wr_32,
4189 .rd_reg_indirect = qla4_82xx_md_rd_32,
4190 .wr_reg_indirect = qla4_82xx_md_wr_32,
4191 .idc_lock = qla4_82xx_idc_lock,
4192 .idc_unlock = qla4_82xx_idc_unlock,
4193 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
4194 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
4195 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
4196 };
4197
4198 static struct isp_operations qla4_83xx_isp_ops = {
4199 .iospace_config = qla4_8xxx_iospace_config,
4200 .pci_config = qla4_8xxx_pci_config,
4201 .disable_intrs = qla4_83xx_disable_intrs,
4202 .enable_intrs = qla4_83xx_enable_intrs,
4203 .start_firmware = qla4_8xxx_load_risc,
4204 .restart_firmware = qla4_83xx_start_firmware,
4205 .intr_handler = qla4_83xx_intr_handler,
4206 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
4207 .need_reset = qla4_8xxx_need_reset,
4208 .reset_chip = qla4_83xx_isp_reset,
4209 .reset_firmware = qla4_8xxx_stop_firmware,
4210 .queue_iocb = qla4_83xx_queue_iocb,
4211 .complete_iocb = qla4_83xx_complete_iocb,
4212 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
4213 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
4214 .get_sys_info = qla4_8xxx_get_sys_info,
4215 .rd_reg_direct = qla4_83xx_rd_reg,
4216 .wr_reg_direct = qla4_83xx_wr_reg,
4217 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
4218 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
4219 .idc_lock = qla4_83xx_drv_lock,
4220 .idc_unlock = qla4_83xx_drv_unlock,
4221 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
4222 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
4223 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
4224 };
4225
4226 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4227 {
4228 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
4229 }
4230
4231 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4232 {
4233 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
4234 }
4235
4236 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4237 {
4238 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
4239 }
4240
4241 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4242 {
4243 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
4244 }
4245
4246 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
4247 {
4248 struct scsi_qla_host *ha = data;
4249 char *str = buf;
4250 int rc;
4251
4252 switch (type) {
4253 case ISCSI_BOOT_ETH_FLAGS:
4254 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4255 break;
4256 case ISCSI_BOOT_ETH_INDEX:
4257 rc = sprintf(str, "0\n");
4258 break;
4259 case ISCSI_BOOT_ETH_MAC:
4260 rc = sysfs_format_mac(str, ha->my_mac,
4261 MAC_ADDR_LEN);
4262 break;
4263 default:
4264 rc = -ENOSYS;
4265 break;
4266 }
4267 return rc;
4268 }
4269
4270 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
4271 {
4272 int rc;
4273
4274 switch (type) {
4275 case ISCSI_BOOT_ETH_FLAGS:
4276 case ISCSI_BOOT_ETH_MAC:
4277 case ISCSI_BOOT_ETH_INDEX:
4278 rc = S_IRUGO;
4279 break;
4280 default:
4281 rc = 0;
4282 break;
4283 }
4284 return rc;
4285 }
4286
4287 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
4288 {
4289 struct scsi_qla_host *ha = data;
4290 char *str = buf;
4291 int rc;
4292
4293 switch (type) {
4294 case ISCSI_BOOT_INI_INITIATOR_NAME:
4295 rc = sprintf(str, "%s\n", ha->name_string);
4296 break;
4297 default:
4298 rc = -ENOSYS;
4299 break;
4300 }
4301 return rc;
4302 }
4303
4304 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
4305 {
4306 int rc;
4307
4308 switch (type) {
4309 case ISCSI_BOOT_INI_INITIATOR_NAME:
4310 rc = S_IRUGO;
4311 break;
4312 default:
4313 rc = 0;
4314 break;
4315 }
4316 return rc;
4317 }
4318
4319 static ssize_t
4320 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
4321 char *buf)
4322 {
4323 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4324 char *str = buf;
4325 int rc;
4326
4327 switch (type) {
4328 case ISCSI_BOOT_TGT_NAME:
4329 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
4330 break;
4331 case ISCSI_BOOT_TGT_IP_ADDR:
4332 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
4333 rc = sprintf(buf, "%pI4\n",
4334 &boot_conn->dest_ipaddr.ip_address);
4335 else
4336 rc = sprintf(str, "%pI6\n",
4337 &boot_conn->dest_ipaddr.ip_address);
4338 break;
4339 case ISCSI_BOOT_TGT_PORT:
4340 rc = sprintf(str, "%d\n", boot_conn->dest_port);
4341 break;
4342 case ISCSI_BOOT_TGT_CHAP_NAME:
4343 rc = sprintf(str, "%.*s\n",
4344 boot_conn->chap.target_chap_name_length,
4345 (char *)&boot_conn->chap.target_chap_name);
4346 break;
4347 case ISCSI_BOOT_TGT_CHAP_SECRET:
4348 rc = sprintf(str, "%.*s\n",
4349 boot_conn->chap.target_secret_length,
4350 (char *)&boot_conn->chap.target_secret);
4351 break;
4352 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4353 rc = sprintf(str, "%.*s\n",
4354 boot_conn->chap.intr_chap_name_length,
4355 (char *)&boot_conn->chap.intr_chap_name);
4356 break;
4357 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4358 rc = sprintf(str, "%.*s\n",
4359 boot_conn->chap.intr_secret_length,
4360 (char *)&boot_conn->chap.intr_secret);
4361 break;
4362 case ISCSI_BOOT_TGT_FLAGS:
4363 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4364 break;
4365 case ISCSI_BOOT_TGT_NIC_ASSOC:
4366 rc = sprintf(str, "0\n");
4367 break;
4368 default:
4369 rc = -ENOSYS;
4370 break;
4371 }
4372 return rc;
4373 }
4374
4375 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
4376 {
4377 struct scsi_qla_host *ha = data;
4378 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
4379
4380 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4381 }
4382
4383 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
4384 {
4385 struct scsi_qla_host *ha = data;
4386 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
4387
4388 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4389 }
4390
4391 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
4392 {
4393 int rc;
4394
4395 switch (type) {
4396 case ISCSI_BOOT_TGT_NAME:
4397 case ISCSI_BOOT_TGT_IP_ADDR:
4398 case ISCSI_BOOT_TGT_PORT:
4399 case ISCSI_BOOT_TGT_CHAP_NAME:
4400 case ISCSI_BOOT_TGT_CHAP_SECRET:
4401 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4402 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4403 case ISCSI_BOOT_TGT_NIC_ASSOC:
4404 case ISCSI_BOOT_TGT_FLAGS:
4405 rc = S_IRUGO;
4406 break;
4407 default:
4408 rc = 0;
4409 break;
4410 }
4411 return rc;
4412 }
4413
4414 static void qla4xxx_boot_release(void *data)
4415 {
4416 struct scsi_qla_host *ha = data;
4417
4418 scsi_host_put(ha->host);
4419 }
4420
4421 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4422 {
4423 dma_addr_t buf_dma;
4424 uint32_t addr, pri_addr, sec_addr;
4425 uint32_t offset;
4426 uint16_t func_num;
4427 uint8_t val;
4428 uint8_t *buf = NULL;
4429 size_t size = 13 * sizeof(uint8_t);
4430 int ret = QLA_SUCCESS;
4431
4432 func_num = PCI_FUNC(ha->pdev->devfn);
4433
4434 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
4435 __func__, ha->pdev->device, func_num);
4436
4437 if (is_qla40XX(ha)) {
4438 if (func_num == 1) {
4439 addr = NVRAM_PORT0_BOOT_MODE;
4440 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
4441 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
4442 } else if (func_num == 3) {
4443 addr = NVRAM_PORT1_BOOT_MODE;
4444 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
4445 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
4446 } else {
4447 ret = QLA_ERROR;
4448 goto exit_boot_info;
4449 }
4450
4451 /* Check Boot Mode */
4452 val = rd_nvram_byte(ha, addr);
4453 if (!(val & 0x07)) {
4454 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
4455 "options : 0x%x\n", __func__, val));
4456 ret = QLA_ERROR;
4457 goto exit_boot_info;
4458 }
4459
4460 /* get primary valid target index */
4461 val = rd_nvram_byte(ha, pri_addr);
4462 if (val & BIT_7)
4463 ddb_index[0] = (val & 0x7f);
4464
4465 /* get secondary valid target index */
4466 val = rd_nvram_byte(ha, sec_addr);
4467 if (val & BIT_7)
4468 ddb_index[1] = (val & 0x7f);
4469
4470 } else if (is_qla80XX(ha)) {
4471 buf = dma_alloc_coherent(&ha->pdev->dev, size,
4472 &buf_dma, GFP_KERNEL);
4473 if (!buf) {
4474 DEBUG2(ql4_printk(KERN_ERR, ha,
4475 "%s: Unable to allocate dma buffer\n",
4476 __func__));
4477 ret = QLA_ERROR;
4478 goto exit_boot_info;
4479 }
4480
4481 if (ha->port_num == 0)
4482 offset = BOOT_PARAM_OFFSET_PORT0;
4483 else if (ha->port_num == 1)
4484 offset = BOOT_PARAM_OFFSET_PORT1;
4485 else {
4486 ret = QLA_ERROR;
4487 goto exit_boot_info_free;
4488 }
4489 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
4490 offset;
4491 if (qla4xxx_get_flash(ha, buf_dma, addr,
4492 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
4493 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
4494 " failed\n", ha->host_no, __func__));
4495 ret = QLA_ERROR;
4496 goto exit_boot_info_free;
4497 }
4498 /* Check Boot Mode */
4499 if (!(buf[1] & 0x07)) {
4500 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
4501 " : 0x%x\n", buf[1]));
4502 ret = QLA_ERROR;
4503 goto exit_boot_info_free;
4504 }
4505
4506 /* get primary valid target index */
4507 if (buf[2] & BIT_7)
4508 ddb_index[0] = buf[2] & 0x7f;
4509
4510 /* get secondary valid target index */
4511 if (buf[11] & BIT_7)
4512 ddb_index[1] = buf[11] & 0x7f;
4513 } else {
4514 ret = QLA_ERROR;
4515 goto exit_boot_info;
4516 }
4517
4518 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
4519 " target ID %d\n", __func__, ddb_index[0],
4520 ddb_index[1]));
4521
4522 exit_boot_info_free:
4523 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
4524 exit_boot_info:
4525 ha->pri_ddb_idx = ddb_index[0];
4526 ha->sec_ddb_idx = ddb_index[1];
4527 return ret;
4528 }
4529
4530 /**
4531 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4532 * @ha: pointer to adapter structure
4533 * @username: CHAP username to be returned
4534 * @password: CHAP password to be returned
4535 *
4536 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4537 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4538 * So from the CHAP cache find the first BIDI CHAP entry and set it
4539 * to the boot record in sysfs.
4540 **/
4541 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4542 char *password)
4543 {
4544 int i, ret = -EINVAL;
4545 int max_chap_entries = 0;
4546 struct ql4_chap_table *chap_table;
4547
4548 if (is_qla80XX(ha))
4549 max_chap_entries = (ha->hw.flt_chap_size / 2) /
4550 sizeof(struct ql4_chap_table);
4551 else
4552 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4553
4554 if (!ha->chap_list) {
4555 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4556 return ret;
4557 }
4558
4559 mutex_lock(&ha->chap_sem);
4560 for (i = 0; i < max_chap_entries; i++) {
4561 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4562 if (chap_table->cookie !=
4563 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4564 continue;
4565 }
4566
4567 if (chap_table->flags & BIT_7) /* local */
4568 continue;
4569
4570 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4571 continue;
4572
4573 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4574 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4575 ret = 0;
4576 break;
4577 }
4578 mutex_unlock(&ha->chap_sem);
4579
4580 return ret;
4581 }
4582
4583
4584 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4585 struct ql4_boot_session_info *boot_sess,
4586 uint16_t ddb_index)
4587 {
4588 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4589 struct dev_db_entry *fw_ddb_entry;
4590 dma_addr_t fw_ddb_entry_dma;
4591 uint16_t idx;
4592 uint16_t options;
4593 int ret = QLA_SUCCESS;
4594
4595 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4596 &fw_ddb_entry_dma, GFP_KERNEL);
4597 if (!fw_ddb_entry) {
4598 DEBUG2(ql4_printk(KERN_ERR, ha,
4599 "%s: Unable to allocate dma buffer.\n",
4600 __func__));
4601 ret = QLA_ERROR;
4602 return ret;
4603 }
4604
4605 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4606 fw_ddb_entry_dma, ddb_index)) {
4607 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4608 "index [%d]\n", __func__, ddb_index));
4609 ret = QLA_ERROR;
4610 goto exit_boot_target;
4611 }
4612
4613 /* Update target name and IP from DDB */
4614 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4615 min(sizeof(boot_sess->target_name),
4616 sizeof(fw_ddb_entry->iscsi_name)));
4617
4618 options = le16_to_cpu(fw_ddb_entry->options);
4619 if (options & DDB_OPT_IPV6_DEVICE) {
4620 memcpy(&boot_conn->dest_ipaddr.ip_address,
4621 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4622 } else {
4623 boot_conn->dest_ipaddr.ip_type = 0x1;
4624 memcpy(&boot_conn->dest_ipaddr.ip_address,
4625 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4626 }
4627
4628 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4629
4630 /* update chap information */
4631 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4632
4633 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4634
4635 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4636
4637 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4638 target_chap_name,
4639 (char *)&boot_conn->chap.target_secret,
4640 idx);
4641 if (ret) {
4642 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4643 ret = QLA_ERROR;
4644 goto exit_boot_target;
4645 }
4646
4647 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4648 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4649 }
4650
4651 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4652
4653 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4654
4655 ret = qla4xxx_get_bidi_chap(ha,
4656 (char *)&boot_conn->chap.intr_chap_name,
4657 (char *)&boot_conn->chap.intr_secret);
4658
4659 if (ret) {
4660 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4661 ret = QLA_ERROR;
4662 goto exit_boot_target;
4663 }
4664
4665 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4666 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4667 }
4668
4669 exit_boot_target:
4670 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4671 fw_ddb_entry, fw_ddb_entry_dma);
4672 return ret;
4673 }
4674
4675 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4676 {
4677 uint16_t ddb_index[2];
4678 int ret = QLA_ERROR;
4679 int rval;
4680
4681 memset(ddb_index, 0, sizeof(ddb_index));
4682 ddb_index[0] = 0xffff;
4683 ddb_index[1] = 0xffff;
4684 ret = get_fw_boot_info(ha, ddb_index);
4685 if (ret != QLA_SUCCESS) {
4686 DEBUG2(ql4_printk(KERN_INFO, ha,
4687 "%s: No boot target configured.\n", __func__));
4688 return ret;
4689 }
4690
4691 if (ql4xdisablesysfsboot)
4692 return QLA_SUCCESS;
4693
4694 if (ddb_index[0] == 0xffff)
4695 goto sec_target;
4696
4697 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4698 ddb_index[0]);
4699 if (rval != QLA_SUCCESS) {
4700 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4701 "configured\n", __func__));
4702 } else
4703 ret = QLA_SUCCESS;
4704
4705 sec_target:
4706 if (ddb_index[1] == 0xffff)
4707 goto exit_get_boot_info;
4708
4709 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4710 ddb_index[1]);
4711 if (rval != QLA_SUCCESS) {
4712 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4713 " configured\n", __func__));
4714 } else
4715 ret = QLA_SUCCESS;
4716
4717 exit_get_boot_info:
4718 return ret;
4719 }
4720
4721 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4722 {
4723 struct iscsi_boot_kobj *boot_kobj;
4724
4725 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4726 return QLA_ERROR;
4727
4728 if (ql4xdisablesysfsboot) {
4729 ql4_printk(KERN_INFO, ha,
4730 "%s: syfsboot disabled - driver will trigger login "
4731 "and publish session for discovery .\n", __func__);
4732 return QLA_SUCCESS;
4733 }
4734
4735
4736 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4737 if (!ha->boot_kset)
4738 goto kset_free;
4739
4740 if (!scsi_host_get(ha->host))
4741 goto kset_free;
4742 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4743 qla4xxx_show_boot_tgt_pri_info,
4744 qla4xxx_tgt_get_attr_visibility,
4745 qla4xxx_boot_release);
4746 if (!boot_kobj)
4747 goto put_host;
4748
4749 if (!scsi_host_get(ha->host))
4750 goto kset_free;
4751 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4752 qla4xxx_show_boot_tgt_sec_info,
4753 qla4xxx_tgt_get_attr_visibility,
4754 qla4xxx_boot_release);
4755 if (!boot_kobj)
4756 goto put_host;
4757
4758 if (!scsi_host_get(ha->host))
4759 goto kset_free;
4760 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4761 qla4xxx_show_boot_ini_info,
4762 qla4xxx_ini_get_attr_visibility,
4763 qla4xxx_boot_release);
4764 if (!boot_kobj)
4765 goto put_host;
4766
4767 if (!scsi_host_get(ha->host))
4768 goto kset_free;
4769 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4770 qla4xxx_show_boot_eth_info,
4771 qla4xxx_eth_get_attr_visibility,
4772 qla4xxx_boot_release);
4773 if (!boot_kobj)
4774 goto put_host;
4775
4776 return QLA_SUCCESS;
4777
4778 put_host:
4779 scsi_host_put(ha->host);
4780 kset_free:
4781 iscsi_boot_destroy_kset(ha->boot_kset);
4782 return -ENOMEM;
4783 }
4784
4785
4786 /**
4787 * qla4xxx_create chap_list - Create CHAP list from FLASH
4788 * @ha: pointer to adapter structure
4789 *
4790 * Read flash and make a list of CHAP entries, during login when a CHAP entry
4791 * is received, it will be checked in this list. If entry exist then the CHAP
4792 * entry index is set in the DDB. If CHAP entry does not exist in this list
4793 * then a new entry is added in FLASH in CHAP table and the index obtained is
4794 * used in the DDB.
4795 **/
4796 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4797 {
4798 int rval = 0;
4799 uint8_t *chap_flash_data = NULL;
4800 uint32_t offset;
4801 dma_addr_t chap_dma;
4802 uint32_t chap_size = 0;
4803
4804 if (is_qla40XX(ha))
4805 chap_size = MAX_CHAP_ENTRIES_40XX *
4806 sizeof(struct ql4_chap_table);
4807 else /* Single region contains CHAP info for both
4808 * ports which is divided into half for each port.
4809 */
4810 chap_size = ha->hw.flt_chap_size / 2;
4811
4812 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4813 &chap_dma, GFP_KERNEL);
4814 if (!chap_flash_data) {
4815 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4816 return;
4817 }
4818 if (is_qla40XX(ha))
4819 offset = FLASH_CHAP_OFFSET;
4820 else {
4821 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4822 if (ha->port_num == 1)
4823 offset += chap_size;
4824 }
4825
4826 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4827 if (rval != QLA_SUCCESS)
4828 goto exit_chap_list;
4829
4830 if (ha->chap_list == NULL)
4831 ha->chap_list = vmalloc(chap_size);
4832 if (ha->chap_list == NULL) {
4833 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4834 goto exit_chap_list;
4835 }
4836
4837 memcpy(ha->chap_list, chap_flash_data, chap_size);
4838
4839 exit_chap_list:
4840 dma_free_coherent(&ha->pdev->dev, chap_size,
4841 chap_flash_data, chap_dma);
4842 }
4843
4844 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4845 struct ql4_tuple_ddb *tddb)
4846 {
4847 struct scsi_qla_host *ha;
4848 struct iscsi_cls_session *cls_sess;
4849 struct iscsi_cls_conn *cls_conn;
4850 struct iscsi_session *sess;
4851 struct iscsi_conn *conn;
4852
4853 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4854 ha = ddb_entry->ha;
4855 cls_sess = ddb_entry->sess;
4856 sess = cls_sess->dd_data;
4857 cls_conn = ddb_entry->conn;
4858 conn = cls_conn->dd_data;
4859
4860 tddb->tpgt = sess->tpgt;
4861 tddb->port = conn->persistent_port;
4862 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4863 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4864 }
4865
4866 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4867 struct ql4_tuple_ddb *tddb,
4868 uint8_t *flash_isid)
4869 {
4870 uint16_t options = 0;
4871
4872 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4873 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4874 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4875
4876 options = le16_to_cpu(fw_ddb_entry->options);
4877 if (options & DDB_OPT_IPV6_DEVICE)
4878 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4879 else
4880 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4881
4882 tddb->port = le16_to_cpu(fw_ddb_entry->port);
4883
4884 if (flash_isid == NULL)
4885 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4886 sizeof(tddb->isid));
4887 else
4888 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4889 }
4890
4891 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4892 struct ql4_tuple_ddb *old_tddb,
4893 struct ql4_tuple_ddb *new_tddb,
4894 uint8_t is_isid_compare)
4895 {
4896 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4897 return QLA_ERROR;
4898
4899 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4900 return QLA_ERROR;
4901
4902 if (old_tddb->port != new_tddb->port)
4903 return QLA_ERROR;
4904
4905 /* For multi sessions, driver generates the ISID, so do not compare
4906 * ISID in reset path since it would be a comparison between the
4907 * driver generated ISID and firmware generated ISID. This could
4908 * lead to adding duplicated DDBs in the list as driver generated
4909 * ISID would not match firmware generated ISID.
4910 */
4911 if (is_isid_compare) {
4912 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4913 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4914 __func__, old_tddb->isid[5], old_tddb->isid[4],
4915 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4916 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4917 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4918 new_tddb->isid[0]));
4919
4920 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4921 sizeof(old_tddb->isid)))
4922 return QLA_ERROR;
4923 }
4924
4925 DEBUG2(ql4_printk(KERN_INFO, ha,
4926 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4927 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4928 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4929 new_tddb->ip_addr, new_tddb->iscsi_name));
4930
4931 return QLA_SUCCESS;
4932 }
4933
4934 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4935 struct dev_db_entry *fw_ddb_entry,
4936 uint32_t *index)
4937 {
4938 struct ddb_entry *ddb_entry;
4939 struct ql4_tuple_ddb *fw_tddb = NULL;
4940 struct ql4_tuple_ddb *tmp_tddb = NULL;
4941 int idx;
4942 int ret = QLA_ERROR;
4943
4944 fw_tddb = vzalloc(sizeof(*fw_tddb));
4945 if (!fw_tddb) {
4946 DEBUG2(ql4_printk(KERN_WARNING, ha,
4947 "Memory Allocation failed.\n"));
4948 ret = QLA_SUCCESS;
4949 goto exit_check;
4950 }
4951
4952 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4953 if (!tmp_tddb) {
4954 DEBUG2(ql4_printk(KERN_WARNING, ha,
4955 "Memory Allocation failed.\n"));
4956 ret = QLA_SUCCESS;
4957 goto exit_check;
4958 }
4959
4960 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4961
4962 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4963 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4964 if (ddb_entry == NULL)
4965 continue;
4966
4967 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4968 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4969 ret = QLA_SUCCESS; /* found */
4970 if (index != NULL)
4971 *index = idx;
4972 goto exit_check;
4973 }
4974 }
4975
4976 exit_check:
4977 if (fw_tddb)
4978 vfree(fw_tddb);
4979 if (tmp_tddb)
4980 vfree(tmp_tddb);
4981 return ret;
4982 }
4983
4984 /**
4985 * qla4xxx_check_existing_isid - check if target with same isid exist
4986 * in target list
4987 * @list_nt: list of target
4988 * @isid: isid to check
4989 *
4990 * This routine return QLA_SUCCESS if target with same isid exist
4991 **/
4992 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4993 {
4994 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4995 struct dev_db_entry *fw_ddb_entry;
4996
4997 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4998 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4999
5000 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
5001 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
5002 return QLA_SUCCESS;
5003 }
5004 }
5005 return QLA_ERROR;
5006 }
5007
5008 /**
5009 * qla4xxx_update_isid - compare ddbs and updated isid
5010 * @ha: Pointer to host adapter structure.
5011 * @list_nt: list of nt target
5012 * @fw_ddb_entry: firmware ddb entry
5013 *
5014 * This routine update isid if ddbs have same iqn, same isid and
5015 * different IP addr.
5016 * Return QLA_SUCCESS if isid is updated.
5017 **/
5018 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
5019 struct list_head *list_nt,
5020 struct dev_db_entry *fw_ddb_entry)
5021 {
5022 uint8_t base_value, i;
5023
5024 base_value = fw_ddb_entry->isid[1] & 0x1f;
5025 for (i = 0; i < 8; i++) {
5026 fw_ddb_entry->isid[1] = (base_value | (i << 5));
5027 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5028 break;
5029 }
5030
5031 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5032 return QLA_ERROR;
5033
5034 return QLA_SUCCESS;
5035 }
5036
5037 /**
5038 * qla4xxx_should_update_isid - check if isid need to update
5039 * @ha: Pointer to host adapter structure.
5040 * @old_tddb: ddb tuple
5041 * @new_tddb: ddb tuple
5042 *
5043 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
5044 * same isid
5045 **/
5046 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
5047 struct ql4_tuple_ddb *old_tddb,
5048 struct ql4_tuple_ddb *new_tddb)
5049 {
5050 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
5051 /* Same ip */
5052 if (old_tddb->port == new_tddb->port)
5053 return QLA_ERROR;
5054 }
5055
5056 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5057 /* different iqn */
5058 return QLA_ERROR;
5059
5060 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5061 sizeof(old_tddb->isid)))
5062 /* different isid */
5063 return QLA_ERROR;
5064
5065 return QLA_SUCCESS;
5066 }
5067
5068 /**
5069 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
5070 * @ha: Pointer to host adapter structure.
5071 * @list_nt: list of nt target.
5072 * @fw_ddb_entry: firmware ddb entry.
5073 *
5074 * This routine check if fw_ddb_entry already exists in list_nt to avoid
5075 * duplicate ddb in list_nt.
5076 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
5077 * Note: This function also update isid of DDB if required.
5078 **/
5079
5080 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
5081 struct list_head *list_nt,
5082 struct dev_db_entry *fw_ddb_entry)
5083 {
5084 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
5085 struct ql4_tuple_ddb *fw_tddb = NULL;
5086 struct ql4_tuple_ddb *tmp_tddb = NULL;
5087 int rval, ret = QLA_ERROR;
5088
5089 fw_tddb = vzalloc(sizeof(*fw_tddb));
5090 if (!fw_tddb) {
5091 DEBUG2(ql4_printk(KERN_WARNING, ha,
5092 "Memory Allocation failed.\n"));
5093 ret = QLA_SUCCESS;
5094 goto exit_check;
5095 }
5096
5097 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5098 if (!tmp_tddb) {
5099 DEBUG2(ql4_printk(KERN_WARNING, ha,
5100 "Memory Allocation failed.\n"));
5101 ret = QLA_SUCCESS;
5102 goto exit_check;
5103 }
5104
5105 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5106
5107 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5108 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
5109 nt_ddb_idx->flash_isid);
5110 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
5111 /* found duplicate ddb */
5112 if (ret == QLA_SUCCESS)
5113 goto exit_check;
5114 }
5115
5116 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5117 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
5118
5119 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
5120 if (ret == QLA_SUCCESS) {
5121 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
5122 if (rval == QLA_SUCCESS)
5123 ret = QLA_ERROR;
5124 else
5125 ret = QLA_SUCCESS;
5126
5127 goto exit_check;
5128 }
5129 }
5130
5131 exit_check:
5132 if (fw_tddb)
5133 vfree(fw_tddb);
5134 if (tmp_tddb)
5135 vfree(tmp_tddb);
5136 return ret;
5137 }
5138
5139 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
5140 {
5141 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5142
5143 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5144 list_del_init(&ddb_idx->list);
5145 vfree(ddb_idx);
5146 }
5147 }
5148
5149 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
5150 struct dev_db_entry *fw_ddb_entry)
5151 {
5152 struct iscsi_endpoint *ep;
5153 struct sockaddr_in *addr;
5154 struct sockaddr_in6 *addr6;
5155 struct sockaddr *t_addr;
5156 struct sockaddr_storage *dst_addr;
5157 char *ip;
5158
5159 /* TODO: need to destroy on unload iscsi_endpoint*/
5160 dst_addr = vmalloc(sizeof(*dst_addr));
5161 if (!dst_addr)
5162 return NULL;
5163
5164 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
5165 t_addr = (struct sockaddr *)dst_addr;
5166 t_addr->sa_family = AF_INET6;
5167 addr6 = (struct sockaddr_in6 *)dst_addr;
5168 ip = (char *)&addr6->sin6_addr;
5169 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
5170 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
5171
5172 } else {
5173 t_addr = (struct sockaddr *)dst_addr;
5174 t_addr->sa_family = AF_INET;
5175 addr = (struct sockaddr_in *)dst_addr;
5176 ip = (char *)&addr->sin_addr;
5177 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
5178 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
5179 }
5180
5181 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
5182 vfree(dst_addr);
5183 return ep;
5184 }
5185
5186 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
5187 {
5188 if (ql4xdisablesysfsboot)
5189 return QLA_SUCCESS;
5190 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
5191 return QLA_ERROR;
5192 return QLA_SUCCESS;
5193 }
5194
5195 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
5196 struct ddb_entry *ddb_entry,
5197 uint16_t idx)
5198 {
5199 uint16_t def_timeout;
5200
5201 ddb_entry->ddb_type = FLASH_DDB;
5202 ddb_entry->fw_ddb_index = INVALID_ENTRY;
5203 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
5204 ddb_entry->ha = ha;
5205 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
5206 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
5207
5208 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
5209 atomic_set(&ddb_entry->relogin_timer, 0);
5210 atomic_set(&ddb_entry->relogin_retry_count, 0);
5211 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
5212 ddb_entry->default_relogin_timeout =
5213 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
5214 def_timeout : LOGIN_TOV;
5215 ddb_entry->default_time2wait =
5216 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
5217
5218 if (ql4xdisablesysfsboot &&
5219 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
5220 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
5221 }
5222
5223 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
5224 {
5225 uint32_t idx = 0;
5226 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
5227 uint32_t sts[MBOX_REG_COUNT];
5228 uint32_t ip_state;
5229 unsigned long wtime;
5230 int ret;
5231
5232 wtime = jiffies + (HZ * IP_CONFIG_TOV);
5233 do {
5234 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
5235 if (ip_idx[idx] == -1)
5236 continue;
5237
5238 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
5239
5240 if (ret == QLA_ERROR) {
5241 ip_idx[idx] = -1;
5242 continue;
5243 }
5244
5245 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
5246
5247 DEBUG2(ql4_printk(KERN_INFO, ha,
5248 "Waiting for IP state for idx = %d, state = 0x%x\n",
5249 ip_idx[idx], ip_state));
5250 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
5251 ip_state == IP_ADDRSTATE_INVALID ||
5252 ip_state == IP_ADDRSTATE_PREFERRED ||
5253 ip_state == IP_ADDRSTATE_DEPRICATED ||
5254 ip_state == IP_ADDRSTATE_DISABLING)
5255 ip_idx[idx] = -1;
5256 }
5257
5258 /* Break if all IP states checked */
5259 if ((ip_idx[0] == -1) &&
5260 (ip_idx[1] == -1) &&
5261 (ip_idx[2] == -1) &&
5262 (ip_idx[3] == -1))
5263 break;
5264 schedule_timeout_uninterruptible(HZ);
5265 } while (time_after(wtime, jiffies));
5266 }
5267
5268 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
5269 struct dev_db_entry *flash_ddb_entry)
5270 {
5271 uint16_t options = 0;
5272 size_t ip_len = IP_ADDR_LEN;
5273
5274 options = le16_to_cpu(fw_ddb_entry->options);
5275 if (options & DDB_OPT_IPV6_DEVICE)
5276 ip_len = IPv6_ADDR_LEN;
5277
5278 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
5279 return QLA_ERROR;
5280
5281 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
5282 sizeof(fw_ddb_entry->isid)))
5283 return QLA_ERROR;
5284
5285 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
5286 sizeof(fw_ddb_entry->port)))
5287 return QLA_ERROR;
5288
5289 return QLA_SUCCESS;
5290 }
5291
5292 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
5293 struct dev_db_entry *fw_ddb_entry,
5294 uint32_t fw_idx, uint32_t *flash_index)
5295 {
5296 struct dev_db_entry *flash_ddb_entry;
5297 dma_addr_t flash_ddb_entry_dma;
5298 uint32_t idx = 0;
5299 int max_ddbs;
5300 int ret = QLA_ERROR, status;
5301
5302 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5303 MAX_DEV_DB_ENTRIES;
5304
5305 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5306 &flash_ddb_entry_dma);
5307 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
5308 ql4_printk(KERN_ERR, ha, "Out of memory\n");
5309 goto exit_find_st_idx;
5310 }
5311
5312 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5313 flash_ddb_entry_dma, fw_idx);
5314 if (status == QLA_SUCCESS) {
5315 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5316 if (status == QLA_SUCCESS) {
5317 *flash_index = fw_idx;
5318 ret = QLA_SUCCESS;
5319 goto exit_find_st_idx;
5320 }
5321 }
5322
5323 for (idx = 0; idx < max_ddbs; idx++) {
5324 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5325 flash_ddb_entry_dma, idx);
5326 if (status == QLA_ERROR)
5327 continue;
5328
5329 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5330 if (status == QLA_SUCCESS) {
5331 *flash_index = idx;
5332 ret = QLA_SUCCESS;
5333 goto exit_find_st_idx;
5334 }
5335 }
5336
5337 if (idx == max_ddbs)
5338 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
5339 fw_idx);
5340
5341 exit_find_st_idx:
5342 if (flash_ddb_entry)
5343 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
5344 flash_ddb_entry_dma);
5345
5346 return ret;
5347 }
5348
5349 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
5350 struct list_head *list_st)
5351 {
5352 struct qla_ddb_index *st_ddb_idx;
5353 int max_ddbs;
5354 int fw_idx_size;
5355 struct dev_db_entry *fw_ddb_entry;
5356 dma_addr_t fw_ddb_dma;
5357 int ret;
5358 uint32_t idx = 0, next_idx = 0;
5359 uint32_t state = 0, conn_err = 0;
5360 uint32_t flash_index = -1;
5361 uint16_t conn_id = 0;
5362
5363 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5364 &fw_ddb_dma);
5365 if (fw_ddb_entry == NULL) {
5366 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5367 goto exit_st_list;
5368 }
5369
5370 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5371 MAX_DEV_DB_ENTRIES;
5372 fw_idx_size = sizeof(struct qla_ddb_index);
5373
5374 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5375 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5376 NULL, &next_idx, &state,
5377 &conn_err, NULL, &conn_id);
5378 if (ret == QLA_ERROR)
5379 break;
5380
5381 /* Ignore DDB if invalid state (unassigned) */
5382 if (state == DDB_DS_UNASSIGNED)
5383 goto continue_next_st;
5384
5385 /* Check if ST, add to the list_st */
5386 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
5387 goto continue_next_st;
5388
5389 st_ddb_idx = vzalloc(fw_idx_size);
5390 if (!st_ddb_idx)
5391 break;
5392
5393 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
5394 &flash_index);
5395 if (ret == QLA_ERROR) {
5396 ql4_printk(KERN_ERR, ha,
5397 "No flash entry for ST at idx [%d]\n", idx);
5398 st_ddb_idx->flash_ddb_idx = idx;
5399 } else {
5400 ql4_printk(KERN_INFO, ha,
5401 "ST at idx [%d] is stored at flash [%d]\n",
5402 idx, flash_index);
5403 st_ddb_idx->flash_ddb_idx = flash_index;
5404 }
5405
5406 st_ddb_idx->fw_ddb_idx = idx;
5407
5408 list_add_tail(&st_ddb_idx->list, list_st);
5409 continue_next_st:
5410 if (next_idx == 0)
5411 break;
5412 }
5413
5414 exit_st_list:
5415 if (fw_ddb_entry)
5416 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5417 }
5418
5419 /**
5420 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
5421 * @ha: pointer to adapter structure
5422 * @list_ddb: List from which failed ddb to be removed
5423 *
5424 * Iterate over the list of DDBs and find and remove DDBs that are either in
5425 * no connection active state or failed state
5426 **/
5427 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
5428 struct list_head *list_ddb)
5429 {
5430 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5431 uint32_t next_idx = 0;
5432 uint32_t state = 0, conn_err = 0;
5433 int ret;
5434
5435 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5436 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
5437 NULL, 0, NULL, &next_idx, &state,
5438 &conn_err, NULL, NULL);
5439 if (ret == QLA_ERROR)
5440 continue;
5441
5442 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5443 state == DDB_DS_SESSION_FAILED) {
5444 list_del_init(&ddb_idx->list);
5445 vfree(ddb_idx);
5446 }
5447 }
5448 }
5449
5450 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
5451 struct ddb_entry *ddb_entry,
5452 struct dev_db_entry *fw_ddb_entry)
5453 {
5454 struct iscsi_cls_session *cls_sess;
5455 struct iscsi_session *sess;
5456 uint32_t max_ddbs = 0;
5457 uint16_t ddb_link = -1;
5458
5459 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5460 MAX_DEV_DB_ENTRIES;
5461
5462 cls_sess = ddb_entry->sess;
5463 sess = cls_sess->dd_data;
5464
5465 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5466 if (ddb_link < max_ddbs)
5467 sess->discovery_parent_idx = ddb_link;
5468 else
5469 sess->discovery_parent_idx = DDB_NO_LINK;
5470 }
5471
5472 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
5473 struct dev_db_entry *fw_ddb_entry,
5474 int is_reset, uint16_t idx)
5475 {
5476 struct iscsi_cls_session *cls_sess;
5477 struct iscsi_session *sess;
5478 struct iscsi_cls_conn *cls_conn;
5479 struct iscsi_endpoint *ep;
5480 uint16_t cmds_max = 32;
5481 uint16_t conn_id = 0;
5482 uint32_t initial_cmdsn = 0;
5483 int ret = QLA_SUCCESS;
5484
5485 struct ddb_entry *ddb_entry = NULL;
5486
5487 /* Create session object, with INVALID_ENTRY,
5488 * the targer_id would get set when we issue the login
5489 */
5490 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
5491 cmds_max, sizeof(struct ddb_entry),
5492 sizeof(struct ql4_task_data),
5493 initial_cmdsn, INVALID_ENTRY);
5494 if (!cls_sess) {
5495 ret = QLA_ERROR;
5496 goto exit_setup;
5497 }
5498
5499 /*
5500 * so calling module_put function to decrement the
5501 * reference count.
5502 **/
5503 module_put(qla4xxx_iscsi_transport.owner);
5504 sess = cls_sess->dd_data;
5505 ddb_entry = sess->dd_data;
5506 ddb_entry->sess = cls_sess;
5507
5508 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
5509 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
5510 sizeof(struct dev_db_entry));
5511
5512 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
5513
5514 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
5515
5516 if (!cls_conn) {
5517 ret = QLA_ERROR;
5518 goto exit_setup;
5519 }
5520
5521 ddb_entry->conn = cls_conn;
5522
5523 /* Setup ep, for displaying attributes in sysfs */
5524 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
5525 if (ep) {
5526 ep->conn = cls_conn;
5527 cls_conn->ep = ep;
5528 } else {
5529 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
5530 ret = QLA_ERROR;
5531 goto exit_setup;
5532 }
5533
5534 /* Update sess/conn params */
5535 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
5536 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
5537
5538 if (is_reset == RESET_ADAPTER) {
5539 iscsi_block_session(cls_sess);
5540 /* Use the relogin path to discover new devices
5541 * by short-circuting the logic of setting
5542 * timer to relogin - instead set the flags
5543 * to initiate login right away.
5544 */
5545 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
5546 set_bit(DF_RELOGIN, &ddb_entry->flags);
5547 }
5548
5549 exit_setup:
5550 return ret;
5551 }
5552
5553 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
5554 struct list_head *list_ddb,
5555 struct dev_db_entry *fw_ddb_entry)
5556 {
5557 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5558 uint16_t ddb_link;
5559
5560 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5561
5562 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5563 if (ddb_idx->fw_ddb_idx == ddb_link) {
5564 DEBUG2(ql4_printk(KERN_INFO, ha,
5565 "Updating NT parent idx from [%d] to [%d]\n",
5566 ddb_link, ddb_idx->flash_ddb_idx));
5567 fw_ddb_entry->ddb_link =
5568 cpu_to_le16(ddb_idx->flash_ddb_idx);
5569 return;
5570 }
5571 }
5572 }
5573
5574 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5575 struct list_head *list_nt,
5576 struct list_head *list_st,
5577 int is_reset)
5578 {
5579 struct dev_db_entry *fw_ddb_entry;
5580 struct ddb_entry *ddb_entry = NULL;
5581 dma_addr_t fw_ddb_dma;
5582 int max_ddbs;
5583 int fw_idx_size;
5584 int ret;
5585 uint32_t idx = 0, next_idx = 0;
5586 uint32_t state = 0, conn_err = 0;
5587 uint32_t ddb_idx = -1;
5588 uint16_t conn_id = 0;
5589 uint16_t ddb_link = -1;
5590 struct qla_ddb_index *nt_ddb_idx;
5591
5592 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5593 &fw_ddb_dma);
5594 if (fw_ddb_entry == NULL) {
5595 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5596 goto exit_nt_list;
5597 }
5598 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5599 MAX_DEV_DB_ENTRIES;
5600 fw_idx_size = sizeof(struct qla_ddb_index);
5601
5602 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5603 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5604 NULL, &next_idx, &state,
5605 &conn_err, NULL, &conn_id);
5606 if (ret == QLA_ERROR)
5607 break;
5608
5609 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
5610 goto continue_next_nt;
5611
5612 /* Check if NT, then add to list it */
5613 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
5614 goto continue_next_nt;
5615
5616 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5617 if (ddb_link < max_ddbs)
5618 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
5619
5620 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
5621 state == DDB_DS_SESSION_FAILED) &&
5622 (is_reset == INIT_ADAPTER))
5623 goto continue_next_nt;
5624
5625 DEBUG2(ql4_printk(KERN_INFO, ha,
5626 "Adding DDB to session = 0x%x\n", idx));
5627
5628 if (is_reset == INIT_ADAPTER) {
5629 nt_ddb_idx = vmalloc(fw_idx_size);
5630 if (!nt_ddb_idx)
5631 break;
5632
5633 nt_ddb_idx->fw_ddb_idx = idx;
5634
5635 /* Copy original isid as it may get updated in function
5636 * qla4xxx_update_isid(). We need original isid in
5637 * function qla4xxx_compare_tuple_ddb to find duplicate
5638 * target */
5639 memcpy(&nt_ddb_idx->flash_isid[0],
5640 &fw_ddb_entry->isid[0],
5641 sizeof(nt_ddb_idx->flash_isid));
5642
5643 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
5644 fw_ddb_entry);
5645 if (ret == QLA_SUCCESS) {
5646 /* free nt_ddb_idx and do not add to list_nt */
5647 vfree(nt_ddb_idx);
5648 goto continue_next_nt;
5649 }
5650
5651 /* Copy updated isid */
5652 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
5653 sizeof(struct dev_db_entry));
5654
5655 list_add_tail(&nt_ddb_idx->list, list_nt);
5656 } else if (is_reset == RESET_ADAPTER) {
5657 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
5658 &ddb_idx);
5659 if (ret == QLA_SUCCESS) {
5660 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
5661 ddb_idx);
5662 if (ddb_entry != NULL)
5663 qla4xxx_update_sess_disc_idx(ha,
5664 ddb_entry,
5665 fw_ddb_entry);
5666 goto continue_next_nt;
5667 }
5668 }
5669
5670 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5671 if (ret == QLA_ERROR)
5672 goto exit_nt_list;
5673
5674 continue_next_nt:
5675 if (next_idx == 0)
5676 break;
5677 }
5678
5679 exit_nt_list:
5680 if (fw_ddb_entry)
5681 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5682 }
5683
5684 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5685 struct list_head *list_nt,
5686 uint16_t target_id)
5687 {
5688 struct dev_db_entry *fw_ddb_entry;
5689 dma_addr_t fw_ddb_dma;
5690 int max_ddbs;
5691 int fw_idx_size;
5692 int ret;
5693 uint32_t idx = 0, next_idx = 0;
5694 uint32_t state = 0, conn_err = 0;
5695 uint16_t conn_id = 0;
5696 struct qla_ddb_index *nt_ddb_idx;
5697
5698 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5699 &fw_ddb_dma);
5700 if (fw_ddb_entry == NULL) {
5701 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5702 goto exit_new_nt_list;
5703 }
5704 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5705 MAX_DEV_DB_ENTRIES;
5706 fw_idx_size = sizeof(struct qla_ddb_index);
5707
5708 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5709 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5710 NULL, &next_idx, &state,
5711 &conn_err, NULL, &conn_id);
5712 if (ret == QLA_ERROR)
5713 break;
5714
5715 /* Check if NT, then add it to list */
5716 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5717 goto continue_next_new_nt;
5718
5719 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5720 goto continue_next_new_nt;
5721
5722 DEBUG2(ql4_printk(KERN_INFO, ha,
5723 "Adding DDB to session = 0x%x\n", idx));
5724
5725 nt_ddb_idx = vmalloc(fw_idx_size);
5726 if (!nt_ddb_idx)
5727 break;
5728
5729 nt_ddb_idx->fw_ddb_idx = idx;
5730
5731 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
5732 if (ret == QLA_SUCCESS) {
5733 /* free nt_ddb_idx and do not add to list_nt */
5734 vfree(nt_ddb_idx);
5735 goto continue_next_new_nt;
5736 }
5737
5738 if (target_id < max_ddbs)
5739 fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
5740
5741 list_add_tail(&nt_ddb_idx->list, list_nt);
5742
5743 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5744 idx);
5745 if (ret == QLA_ERROR)
5746 goto exit_new_nt_list;
5747
5748 continue_next_new_nt:
5749 if (next_idx == 0)
5750 break;
5751 }
5752
5753 exit_new_nt_list:
5754 if (fw_ddb_entry)
5755 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5756 }
5757
5758 /**
5759 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
5760 * @dev: dev associated with the sysfs entry
5761 * @data: pointer to flashnode session object
5762 *
5763 * Returns:
5764 * 1: if flashnode entry is non-persistent
5765 * 0: if flashnode entry is persistent
5766 **/
5767 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5768 {
5769 struct iscsi_bus_flash_session *fnode_sess;
5770
5771 if (!iscsi_flashnode_bus_match(dev, NULL))
5772 return 0;
5773
5774 fnode_sess = iscsi_dev_to_flash_session(dev);
5775
5776 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
5777 }
5778
5779 /**
5780 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
5781 * @ha: pointer to host
5782 * @fw_ddb_entry: flash ddb data
5783 * @idx: target index
5784 * @user: if set then this call is made from userland else from kernel
5785 *
5786 * Returns:
5787 * On sucess: QLA_SUCCESS
5788 * On failure: QLA_ERROR
5789 *
5790 * This create separate sysfs entries for session and connection attributes of
5791 * the given fw ddb entry.
5792 * If this is invoked as a result of a userspace call then the entry is marked
5793 * as nonpersistent using flash_state field.
5794 **/
5795 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5796 struct dev_db_entry *fw_ddb_entry,
5797 uint16_t *idx, int user)
5798 {
5799 struct iscsi_bus_flash_session *fnode_sess = NULL;
5800 struct iscsi_bus_flash_conn *fnode_conn = NULL;
5801 int rc = QLA_ERROR;
5802
5803 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
5804 &qla4xxx_iscsi_transport, 0);
5805 if (!fnode_sess) {
5806 ql4_printk(KERN_ERR, ha,
5807 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
5808 __func__, *idx, ha->host_no);
5809 goto exit_tgt_create;
5810 }
5811
5812 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
5813 &qla4xxx_iscsi_transport, 0);
5814 if (!fnode_conn) {
5815 ql4_printk(KERN_ERR, ha,
5816 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
5817 __func__, *idx, ha->host_no);
5818 goto free_sess;
5819 }
5820
5821 if (user) {
5822 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
5823 } else {
5824 fnode_sess->flash_state = DEV_DB_PERSISTENT;
5825
5826 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
5827 fnode_sess->is_boot_target = 1;
5828 else
5829 fnode_sess->is_boot_target = 0;
5830 }
5831
5832 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
5833 fw_ddb_entry);
5834
5835 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5836 __func__, fnode_sess->dev.kobj.name);
5837
5838 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5839 __func__, fnode_conn->dev.kobj.name);
5840
5841 return QLA_SUCCESS;
5842
5843 free_sess:
5844 iscsi_destroy_flashnode_sess(fnode_sess);
5845
5846 exit_tgt_create:
5847 return QLA_ERROR;
5848 }
5849
5850 /**
5851 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
5852 * @shost: pointer to host
5853 * @buf: type of ddb entry (ipv4/ipv6)
5854 * @len: length of buf
5855 *
5856 * This creates new ddb entry in the flash by finding first free index and
5857 * storing default ddb there. And then create sysfs entry for the new ddb entry.
5858 **/
5859 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5860 int len)
5861 {
5862 struct scsi_qla_host *ha = to_qla_host(shost);
5863 struct dev_db_entry *fw_ddb_entry = NULL;
5864 dma_addr_t fw_ddb_entry_dma;
5865 struct device *dev;
5866 uint16_t idx = 0;
5867 uint16_t max_ddbs = 0;
5868 uint32_t options = 0;
5869 uint32_t rval = QLA_ERROR;
5870
5871 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
5872 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
5873 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
5874 __func__));
5875 goto exit_ddb_add;
5876 }
5877
5878 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
5879 MAX_DEV_DB_ENTRIES;
5880
5881 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5882 &fw_ddb_entry_dma, GFP_KERNEL);
5883 if (!fw_ddb_entry) {
5884 DEBUG2(ql4_printk(KERN_ERR, ha,
5885 "%s: Unable to allocate dma buffer\n",
5886 __func__));
5887 goto exit_ddb_add;
5888 }
5889
5890 dev = iscsi_find_flashnode_sess(ha->host, NULL,
5891 qla4xxx_sysfs_ddb_is_non_persistent);
5892 if (dev) {
5893 ql4_printk(KERN_ERR, ha,
5894 "%s: A non-persistent entry %s found\n",
5895 __func__, dev->kobj.name);
5896 put_device(dev);
5897 goto exit_ddb_add;
5898 }
5899
5900 /* Index 0 and 1 are reserved for boot target entries */
5901 for (idx = 2; idx < max_ddbs; idx++) {
5902 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
5903 fw_ddb_entry_dma, idx))
5904 break;
5905 }
5906
5907 if (idx == max_ddbs)
5908 goto exit_ddb_add;
5909
5910 if (!strncasecmp("ipv6", buf, 4))
5911 options |= IPV6_DEFAULT_DDB_ENTRY;
5912
5913 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5914 if (rval == QLA_ERROR)
5915 goto exit_ddb_add;
5916
5917 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
5918
5919 exit_ddb_add:
5920 if (fw_ddb_entry)
5921 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5922 fw_ddb_entry, fw_ddb_entry_dma);
5923 if (rval == QLA_SUCCESS)
5924 return idx;
5925 else
5926 return -EIO;
5927 }
5928
5929 /**
5930 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
5931 * @fnode_sess: pointer to session attrs of flash ddb entry
5932 * @fnode_conn: pointer to connection attrs of flash ddb entry
5933 *
5934 * This writes the contents of target ddb buffer to Flash with a valid cookie
5935 * value in order to make the ddb entry persistent.
5936 **/
5937 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
5938 struct iscsi_bus_flash_conn *fnode_conn)
5939 {
5940 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5941 struct scsi_qla_host *ha = to_qla_host(shost);
5942 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
5943 struct dev_db_entry *fw_ddb_entry = NULL;
5944 dma_addr_t fw_ddb_entry_dma;
5945 uint32_t options = 0;
5946 int rval = 0;
5947
5948 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5949 &fw_ddb_entry_dma, GFP_KERNEL);
5950 if (!fw_ddb_entry) {
5951 DEBUG2(ql4_printk(KERN_ERR, ha,
5952 "%s: Unable to allocate dma buffer\n",
5953 __func__));
5954 rval = -ENOMEM;
5955 goto exit_ddb_apply;
5956 }
5957
5958 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5959 options |= IPV6_DEFAULT_DDB_ENTRY;
5960
5961 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5962 if (rval == QLA_ERROR)
5963 goto exit_ddb_apply;
5964
5965 dev_db_start_offset += (fnode_sess->target_id *
5966 sizeof(*fw_ddb_entry));
5967
5968 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5969 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5970
5971 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
5972 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
5973
5974 if (rval == QLA_SUCCESS) {
5975 fnode_sess->flash_state = DEV_DB_PERSISTENT;
5976 ql4_printk(KERN_INFO, ha,
5977 "%s: flash node %u of host %lu written to flash\n",
5978 __func__, fnode_sess->target_id, ha->host_no);
5979 } else {
5980 rval = -EIO;
5981 ql4_printk(KERN_ERR, ha,
5982 "%s: Error while writing flash node %u of host %lu to flash\n",
5983 __func__, fnode_sess->target_id, ha->host_no);
5984 }
5985
5986 exit_ddb_apply:
5987 if (fw_ddb_entry)
5988 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5989 fw_ddb_entry, fw_ddb_entry_dma);
5990 return rval;
5991 }
5992
5993 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
5994 struct dev_db_entry *fw_ddb_entry,
5995 uint16_t idx)
5996 {
5997 struct dev_db_entry *ddb_entry = NULL;
5998 dma_addr_t ddb_entry_dma;
5999 unsigned long wtime;
6000 uint32_t mbx_sts = 0;
6001 uint32_t state = 0, conn_err = 0;
6002 uint16_t tmo = 0;
6003 int ret = 0;
6004
6005 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6006 &ddb_entry_dma, GFP_KERNEL);
6007 if (!ddb_entry) {
6008 DEBUG2(ql4_printk(KERN_ERR, ha,
6009 "%s: Unable to allocate dma buffer\n",
6010 __func__));
6011 return QLA_ERROR;
6012 }
6013
6014 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
6015
6016 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
6017 if (ret != QLA_SUCCESS) {
6018 DEBUG2(ql4_printk(KERN_ERR, ha,
6019 "%s: Unable to set ddb entry for index %d\n",
6020 __func__, idx));
6021 goto exit_ddb_conn_open;
6022 }
6023
6024 qla4xxx_conn_open(ha, idx);
6025
6026 /* To ensure that sendtargets is done, wait for at least 12 secs */
6027 tmo = ((ha->def_timeout > LOGIN_TOV) &&
6028 (ha->def_timeout < LOGIN_TOV * 10) ?
6029 ha->def_timeout : LOGIN_TOV);
6030
6031 DEBUG2(ql4_printk(KERN_INFO, ha,
6032 "Default time to wait for login to ddb %d\n", tmo));
6033
6034 wtime = jiffies + (HZ * tmo);
6035 do {
6036 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
6037 NULL, &state, &conn_err, NULL,
6038 NULL);
6039 if (ret == QLA_ERROR)
6040 continue;
6041
6042 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6043 state == DDB_DS_SESSION_FAILED)
6044 break;
6045
6046 schedule_timeout_uninterruptible(HZ / 10);
6047 } while (time_after(wtime, jiffies));
6048
6049 exit_ddb_conn_open:
6050 if (ddb_entry)
6051 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6052 ddb_entry, ddb_entry_dma);
6053 return ret;
6054 }
6055
6056 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
6057 struct dev_db_entry *fw_ddb_entry,
6058 uint16_t target_id)
6059 {
6060 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6061 struct list_head list_nt;
6062 uint16_t ddb_index;
6063 int ret = 0;
6064
6065 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
6066 ql4_printk(KERN_WARNING, ha,
6067 "%s: A discovery already in progress!\n", __func__);
6068 return QLA_ERROR;
6069 }
6070
6071 INIT_LIST_HEAD(&list_nt);
6072
6073 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6074
6075 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
6076 if (ret == QLA_ERROR)
6077 goto exit_login_st_clr_bit;
6078
6079 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
6080 if (ret == QLA_ERROR)
6081 goto exit_login_st;
6082
6083 qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
6084
6085 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
6086 list_del_init(&ddb_idx->list);
6087 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
6088 vfree(ddb_idx);
6089 }
6090
6091 exit_login_st:
6092 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
6093 ql4_printk(KERN_ERR, ha,
6094 "Unable to clear DDB index = 0x%x\n", ddb_index);
6095 }
6096
6097 clear_bit(ddb_index, ha->ddb_idx_map);
6098
6099 exit_login_st_clr_bit:
6100 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6101 return ret;
6102 }
6103
6104 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
6105 struct dev_db_entry *fw_ddb_entry,
6106 uint16_t idx)
6107 {
6108 int ret = QLA_ERROR;
6109
6110 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
6111 if (ret != QLA_SUCCESS)
6112 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
6113 idx);
6114 else
6115 ret = -EPERM;
6116
6117 return ret;
6118 }
6119
6120 /**
6121 * qla4xxx_sysfs_ddb_login - Login to the specified target
6122 * @fnode_sess: pointer to session attrs of flash ddb entry
6123 * @fnode_conn: pointer to connection attrs of flash ddb entry
6124 *
6125 * This logs in to the specified target
6126 **/
6127 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
6128 struct iscsi_bus_flash_conn *fnode_conn)
6129 {
6130 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6131 struct scsi_qla_host *ha = to_qla_host(shost);
6132 struct dev_db_entry *fw_ddb_entry = NULL;
6133 dma_addr_t fw_ddb_entry_dma;
6134 uint32_t options = 0;
6135 int ret = 0;
6136
6137 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
6138 ql4_printk(KERN_ERR, ha,
6139 "%s: Target info is not persistent\n", __func__);
6140 ret = -EIO;
6141 goto exit_ddb_login;
6142 }
6143
6144 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6145 &fw_ddb_entry_dma, GFP_KERNEL);
6146 if (!fw_ddb_entry) {
6147 DEBUG2(ql4_printk(KERN_ERR, ha,
6148 "%s: Unable to allocate dma buffer\n",
6149 __func__));
6150 ret = -ENOMEM;
6151 goto exit_ddb_login;
6152 }
6153
6154 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6155 options |= IPV6_DEFAULT_DDB_ENTRY;
6156
6157 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6158 if (ret == QLA_ERROR)
6159 goto exit_ddb_login;
6160
6161 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6162 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6163
6164 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
6165 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
6166 fnode_sess->target_id);
6167 else
6168 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
6169 fnode_sess->target_id);
6170
6171 if (ret > 0)
6172 ret = -EIO;
6173
6174 exit_ddb_login:
6175 if (fw_ddb_entry)
6176 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6177 fw_ddb_entry, fw_ddb_entry_dma);
6178 return ret;
6179 }
6180
6181 /**
6182 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
6183 * @cls_sess: pointer to session to be logged out
6184 *
6185 * This performs session log out from the specified target
6186 **/
6187 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
6188 {
6189 struct iscsi_session *sess;
6190 struct ddb_entry *ddb_entry = NULL;
6191 struct scsi_qla_host *ha;
6192 struct dev_db_entry *fw_ddb_entry = NULL;
6193 dma_addr_t fw_ddb_entry_dma;
6194 unsigned long flags;
6195 unsigned long wtime;
6196 uint32_t ddb_state;
6197 int options;
6198 int ret = 0;
6199
6200 sess = cls_sess->dd_data;
6201 ddb_entry = sess->dd_data;
6202 ha = ddb_entry->ha;
6203
6204 if (ddb_entry->ddb_type != FLASH_DDB) {
6205 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
6206 __func__);
6207 ret = -ENXIO;
6208 goto exit_ddb_logout;
6209 }
6210
6211 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6212 ql4_printk(KERN_ERR, ha,
6213 "%s: Logout from boot target entry is not permitted.\n",
6214 __func__);
6215 ret = -EPERM;
6216 goto exit_ddb_logout;
6217 }
6218
6219 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6220 &fw_ddb_entry_dma, GFP_KERNEL);
6221 if (!fw_ddb_entry) {
6222 ql4_printk(KERN_ERR, ha,
6223 "%s: Unable to allocate dma buffer\n", __func__);
6224 ret = -ENOMEM;
6225 goto exit_ddb_logout;
6226 }
6227
6228 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
6229 goto ddb_logout_init;
6230
6231 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6232 fw_ddb_entry, fw_ddb_entry_dma,
6233 NULL, NULL, &ddb_state, NULL,
6234 NULL, NULL);
6235 if (ret == QLA_ERROR)
6236 goto ddb_logout_init;
6237
6238 if (ddb_state == DDB_DS_SESSION_ACTIVE)
6239 goto ddb_logout_init;
6240
6241 /* wait until next relogin is triggered using DF_RELOGIN and
6242 * clear DF_RELOGIN to avoid invocation of further relogin
6243 */
6244 wtime = jiffies + (HZ * RELOGIN_TOV);
6245 do {
6246 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
6247 goto ddb_logout_init;
6248
6249 schedule_timeout_uninterruptible(HZ);
6250 } while ((time_after(wtime, jiffies)));
6251
6252 ddb_logout_init:
6253 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6254 atomic_set(&ddb_entry->relogin_timer, 0);
6255
6256 options = LOGOUT_OPTION_CLOSE_SESSION;
6257 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
6258
6259 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
6260 wtime = jiffies + (HZ * LOGOUT_TOV);
6261 do {
6262 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6263 fw_ddb_entry, fw_ddb_entry_dma,
6264 NULL, NULL, &ddb_state, NULL,
6265 NULL, NULL);
6266 if (ret == QLA_ERROR)
6267 goto ddb_logout_clr_sess;
6268
6269 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
6270 (ddb_state == DDB_DS_SESSION_FAILED))
6271 goto ddb_logout_clr_sess;
6272
6273 schedule_timeout_uninterruptible(HZ);
6274 } while ((time_after(wtime, jiffies)));
6275
6276 ddb_logout_clr_sess:
6277 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
6278 /*
6279 * we have decremented the reference count of the driver
6280 * when we setup the session to have the driver unload
6281 * to be seamless without actually destroying the
6282 * session
6283 **/
6284 try_module_get(qla4xxx_iscsi_transport.owner);
6285 iscsi_destroy_endpoint(ddb_entry->conn->ep);
6286
6287 spin_lock_irqsave(&ha->hardware_lock, flags);
6288 qla4xxx_free_ddb(ha, ddb_entry);
6289 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
6290 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6291
6292 iscsi_session_teardown(ddb_entry->sess);
6293
6294 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
6295 ret = QLA_SUCCESS;
6296
6297 exit_ddb_logout:
6298 if (fw_ddb_entry)
6299 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6300 fw_ddb_entry, fw_ddb_entry_dma);
6301 return ret;
6302 }
6303
6304 /**
6305 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
6306 * @fnode_sess: pointer to session attrs of flash ddb entry
6307 * @fnode_conn: pointer to connection attrs of flash ddb entry
6308 *
6309 * This performs log out from the specified target
6310 **/
6311 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
6312 struct iscsi_bus_flash_conn *fnode_conn)
6313 {
6314 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6315 struct scsi_qla_host *ha = to_qla_host(shost);
6316 struct ql4_tuple_ddb *flash_tddb = NULL;
6317 struct ql4_tuple_ddb *tmp_tddb = NULL;
6318 struct dev_db_entry *fw_ddb_entry = NULL;
6319 struct ddb_entry *ddb_entry = NULL;
6320 dma_addr_t fw_ddb_dma;
6321 uint32_t next_idx = 0;
6322 uint32_t state = 0, conn_err = 0;
6323 uint16_t conn_id = 0;
6324 int idx, index;
6325 int status, ret = 0;
6326
6327 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6328 &fw_ddb_dma);
6329 if (fw_ddb_entry == NULL) {
6330 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6331 ret = -ENOMEM;
6332 goto exit_ddb_logout;
6333 }
6334
6335 flash_tddb = vzalloc(sizeof(*flash_tddb));
6336 if (!flash_tddb) {
6337 ql4_printk(KERN_WARNING, ha,
6338 "%s:Memory Allocation failed.\n", __func__);
6339 ret = -ENOMEM;
6340 goto exit_ddb_logout;
6341 }
6342
6343 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6344 if (!tmp_tddb) {
6345 ql4_printk(KERN_WARNING, ha,
6346 "%s:Memory Allocation failed.\n", __func__);
6347 ret = -ENOMEM;
6348 goto exit_ddb_logout;
6349 }
6350
6351 if (!fnode_sess->targetname) {
6352 ql4_printk(KERN_ERR, ha,
6353 "%s:Cannot logout from SendTarget entry\n",
6354 __func__);
6355 ret = -EPERM;
6356 goto exit_ddb_logout;
6357 }
6358
6359 if (fnode_sess->is_boot_target) {
6360 ql4_printk(KERN_ERR, ha,
6361 "%s: Logout from boot target entry is not permitted.\n",
6362 __func__);
6363 ret = -EPERM;
6364 goto exit_ddb_logout;
6365 }
6366
6367 strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6368 ISCSI_NAME_SIZE);
6369
6370 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6371 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6372 else
6373 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6374
6375 flash_tddb->tpgt = fnode_sess->tpgt;
6376 flash_tddb->port = fnode_conn->port;
6377
6378 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6379
6380 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6381 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6382 if (ddb_entry == NULL)
6383 continue;
6384
6385 if (ddb_entry->ddb_type != FLASH_DDB)
6386 continue;
6387
6388 index = ddb_entry->sess->target_id;
6389 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6390 fw_ddb_dma, NULL, &next_idx,
6391 &state, &conn_err, NULL,
6392 &conn_id);
6393 if (status == QLA_ERROR) {
6394 ret = -ENOMEM;
6395 break;
6396 }
6397
6398 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6399
6400 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6401 true);
6402 if (status == QLA_SUCCESS) {
6403 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6404 break;
6405 }
6406 }
6407
6408 if (idx == MAX_DDB_ENTRIES)
6409 ret = -ESRCH;
6410
6411 exit_ddb_logout:
6412 if (flash_tddb)
6413 vfree(flash_tddb);
6414 if (tmp_tddb)
6415 vfree(tmp_tddb);
6416 if (fw_ddb_entry)
6417 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6418
6419 return ret;
6420 }
6421
6422 static int
6423 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6424 int param, char *buf)
6425 {
6426 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6427 struct scsi_qla_host *ha = to_qla_host(shost);
6428 struct iscsi_bus_flash_conn *fnode_conn;
6429 struct ql4_chap_table chap_tbl;
6430 struct device *dev;
6431 int parent_type;
6432 int rc = 0;
6433
6434 dev = iscsi_find_flashnode_conn(fnode_sess);
6435 if (!dev)
6436 return -EIO;
6437
6438 fnode_conn = iscsi_dev_to_flash_conn(dev);
6439
6440 switch (param) {
6441 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6442 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6443 break;
6444 case ISCSI_FLASHNODE_PORTAL_TYPE:
6445 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6446 break;
6447 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6448 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6449 break;
6450 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6451 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6452 break;
6453 case ISCSI_FLASHNODE_ENTRY_EN:
6454 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6455 break;
6456 case ISCSI_FLASHNODE_HDR_DGST_EN:
6457 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6458 break;
6459 case ISCSI_FLASHNODE_DATA_DGST_EN:
6460 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6461 break;
6462 case ISCSI_FLASHNODE_IMM_DATA_EN:
6463 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6464 break;
6465 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6466 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6467 break;
6468 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6469 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6470 break;
6471 case ISCSI_FLASHNODE_PDU_INORDER:
6472 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6473 break;
6474 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6475 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6476 break;
6477 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6478 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6479 break;
6480 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6481 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6482 break;
6483 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6484 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6485 break;
6486 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6487 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6488 break;
6489 case ISCSI_FLASHNODE_ERL:
6490 rc = sprintf(buf, "%u\n", fnode_sess->erl);
6491 break;
6492 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6493 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6494 break;
6495 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6496 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6497 break;
6498 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6499 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6500 break;
6501 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6502 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6503 break;
6504 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6505 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6506 break;
6507 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6508 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6509 break;
6510 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6511 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6512 break;
6513 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6514 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6515 break;
6516 case ISCSI_FLASHNODE_FIRST_BURST:
6517 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6518 break;
6519 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6520 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6521 break;
6522 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6523 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6524 break;
6525 case ISCSI_FLASHNODE_MAX_R2T:
6526 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6527 break;
6528 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6529 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6530 break;
6531 case ISCSI_FLASHNODE_ISID:
6532 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6533 fnode_sess->isid[0], fnode_sess->isid[1],
6534 fnode_sess->isid[2], fnode_sess->isid[3],
6535 fnode_sess->isid[4], fnode_sess->isid[5]);
6536 break;
6537 case ISCSI_FLASHNODE_TSID:
6538 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6539 break;
6540 case ISCSI_FLASHNODE_PORT:
6541 rc = sprintf(buf, "%d\n", fnode_conn->port);
6542 break;
6543 case ISCSI_FLASHNODE_MAX_BURST:
6544 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6545 break;
6546 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6547 rc = sprintf(buf, "%u\n",
6548 fnode_sess->default_taskmgmt_timeout);
6549 break;
6550 case ISCSI_FLASHNODE_IPADDR:
6551 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6552 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6553 else
6554 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6555 break;
6556 case ISCSI_FLASHNODE_ALIAS:
6557 if (fnode_sess->targetalias)
6558 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6559 else
6560 rc = sprintf(buf, "\n");
6561 break;
6562 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6563 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6564 rc = sprintf(buf, "%pI6\n",
6565 fnode_conn->redirect_ipaddr);
6566 else
6567 rc = sprintf(buf, "%pI4\n",
6568 fnode_conn->redirect_ipaddr);
6569 break;
6570 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6571 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6572 break;
6573 case ISCSI_FLASHNODE_LOCAL_PORT:
6574 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6575 break;
6576 case ISCSI_FLASHNODE_IPV4_TOS:
6577 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6578 break;
6579 case ISCSI_FLASHNODE_IPV6_TC:
6580 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6581 rc = sprintf(buf, "%u\n",
6582 fnode_conn->ipv6_traffic_class);
6583 else
6584 rc = sprintf(buf, "\n");
6585 break;
6586 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6587 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6588 break;
6589 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6590 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6591 rc = sprintf(buf, "%pI6\n",
6592 fnode_conn->link_local_ipv6_addr);
6593 else
6594 rc = sprintf(buf, "\n");
6595 break;
6596 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6597 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
6598 break;
6599 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6600 if (fnode_sess->discovery_parent_type == DDB_ISNS)
6601 parent_type = ISCSI_DISC_PARENT_ISNS;
6602 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6603 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6604 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6605 parent_type = ISCSI_DISC_PARENT_SENDTGT;
6606 else
6607 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6608
6609 rc = sprintf(buf, "%s\n",
6610 iscsi_get_discovery_parent_name(parent_type));
6611 break;
6612 case ISCSI_FLASHNODE_NAME:
6613 if (fnode_sess->targetname)
6614 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6615 else
6616 rc = sprintf(buf, "\n");
6617 break;
6618 case ISCSI_FLASHNODE_TPGT:
6619 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6620 break;
6621 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6622 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6623 break;
6624 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6625 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6626 break;
6627 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6628 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6629 break;
6630 case ISCSI_FLASHNODE_USERNAME:
6631 if (fnode_sess->chap_auth_en) {
6632 qla4xxx_get_uni_chap_at_index(ha,
6633 chap_tbl.name,
6634 chap_tbl.secret,
6635 fnode_sess->chap_out_idx);
6636 rc = sprintf(buf, "%s\n", chap_tbl.name);
6637 } else {
6638 rc = sprintf(buf, "\n");
6639 }
6640 break;
6641 case ISCSI_FLASHNODE_PASSWORD:
6642 if (fnode_sess->chap_auth_en) {
6643 qla4xxx_get_uni_chap_at_index(ha,
6644 chap_tbl.name,
6645 chap_tbl.secret,
6646 fnode_sess->chap_out_idx);
6647 rc = sprintf(buf, "%s\n", chap_tbl.secret);
6648 } else {
6649 rc = sprintf(buf, "\n");
6650 }
6651 break;
6652 case ISCSI_FLASHNODE_STATSN:
6653 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6654 break;
6655 case ISCSI_FLASHNODE_EXP_STATSN:
6656 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6657 break;
6658 case ISCSI_FLASHNODE_IS_BOOT_TGT:
6659 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6660 break;
6661 default:
6662 rc = -ENOSYS;
6663 break;
6664 }
6665
6666 put_device(dev);
6667 return rc;
6668 }
6669
6670 /**
6671 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6672 * @fnode_sess: pointer to session attrs of flash ddb entry
6673 * @fnode_conn: pointer to connection attrs of flash ddb entry
6674 * @data: Parameters and their values to update
6675 * @len: len of data
6676 *
6677 * This sets the parameter of flash ddb entry and writes them to flash
6678 **/
6679 static int
6680 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6681 struct iscsi_bus_flash_conn *fnode_conn,
6682 void *data, int len)
6683 {
6684 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6685 struct scsi_qla_host *ha = to_qla_host(shost);
6686 struct iscsi_flashnode_param_info *fnode_param;
6687 struct nlattr *attr;
6688 int rc = QLA_ERROR;
6689 uint32_t rem = len;
6690
6691 nla_for_each_attr(attr, data, len, rem) {
6692 fnode_param = nla_data(attr);
6693
6694 switch (fnode_param->param) {
6695 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6696 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6697 break;
6698 case ISCSI_FLASHNODE_PORTAL_TYPE:
6699 memcpy(fnode_sess->portal_type, fnode_param->value,
6700 strlen(fnode_sess->portal_type));
6701 break;
6702 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6703 fnode_sess->auto_snd_tgt_disable =
6704 fnode_param->value[0];
6705 break;
6706 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6707 fnode_sess->discovery_sess = fnode_param->value[0];
6708 break;
6709 case ISCSI_FLASHNODE_ENTRY_EN:
6710 fnode_sess->entry_state = fnode_param->value[0];
6711 break;
6712 case ISCSI_FLASHNODE_HDR_DGST_EN:
6713 fnode_conn->hdrdgst_en = fnode_param->value[0];
6714 break;
6715 case ISCSI_FLASHNODE_DATA_DGST_EN:
6716 fnode_conn->datadgst_en = fnode_param->value[0];
6717 break;
6718 case ISCSI_FLASHNODE_IMM_DATA_EN:
6719 fnode_sess->imm_data_en = fnode_param->value[0];
6720 break;
6721 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6722 fnode_sess->initial_r2t_en = fnode_param->value[0];
6723 break;
6724 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6725 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6726 break;
6727 case ISCSI_FLASHNODE_PDU_INORDER:
6728 fnode_sess->pdu_inorder_en = fnode_param->value[0];
6729 break;
6730 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6731 fnode_sess->chap_auth_en = fnode_param->value[0];
6732 break;
6733 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6734 fnode_conn->snack_req_en = fnode_param->value[0];
6735 break;
6736 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6737 fnode_sess->discovery_logout_en = fnode_param->value[0];
6738 break;
6739 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6740 fnode_sess->bidi_chap_en = fnode_param->value[0];
6741 break;
6742 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6743 fnode_sess->discovery_auth_optional =
6744 fnode_param->value[0];
6745 break;
6746 case ISCSI_FLASHNODE_ERL:
6747 fnode_sess->erl = fnode_param->value[0];
6748 break;
6749 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6750 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
6751 break;
6752 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6753 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
6754 break;
6755 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6756 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
6757 break;
6758 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6759 fnode_conn->tcp_timer_scale = fnode_param->value[0];
6760 break;
6761 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6762 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
6763 break;
6764 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6765 fnode_conn->fragment_disable = fnode_param->value[0];
6766 break;
6767 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6768 fnode_conn->max_recv_dlength =
6769 *(unsigned *)fnode_param->value;
6770 break;
6771 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6772 fnode_conn->max_xmit_dlength =
6773 *(unsigned *)fnode_param->value;
6774 break;
6775 case ISCSI_FLASHNODE_FIRST_BURST:
6776 fnode_sess->first_burst =
6777 *(unsigned *)fnode_param->value;
6778 break;
6779 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6780 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
6781 break;
6782 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6783 fnode_sess->time2retain =
6784 *(uint16_t *)fnode_param->value;
6785 break;
6786 case ISCSI_FLASHNODE_MAX_R2T:
6787 fnode_sess->max_r2t =
6788 *(uint16_t *)fnode_param->value;
6789 break;
6790 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6791 fnode_conn->keepalive_timeout =
6792 *(uint16_t *)fnode_param->value;
6793 break;
6794 case ISCSI_FLASHNODE_ISID:
6795 memcpy(fnode_sess->isid, fnode_param->value,
6796 sizeof(fnode_sess->isid));
6797 break;
6798 case ISCSI_FLASHNODE_TSID:
6799 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
6800 break;
6801 case ISCSI_FLASHNODE_PORT:
6802 fnode_conn->port = *(uint16_t *)fnode_param->value;
6803 break;
6804 case ISCSI_FLASHNODE_MAX_BURST:
6805 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
6806 break;
6807 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6808 fnode_sess->default_taskmgmt_timeout =
6809 *(uint16_t *)fnode_param->value;
6810 break;
6811 case ISCSI_FLASHNODE_IPADDR:
6812 memcpy(fnode_conn->ipaddress, fnode_param->value,
6813 IPv6_ADDR_LEN);
6814 break;
6815 case ISCSI_FLASHNODE_ALIAS:
6816 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
6817 (char *)fnode_param->value);
6818 break;
6819 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6820 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
6821 IPv6_ADDR_LEN);
6822 break;
6823 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6824 fnode_conn->max_segment_size =
6825 *(unsigned *)fnode_param->value;
6826 break;
6827 case ISCSI_FLASHNODE_LOCAL_PORT:
6828 fnode_conn->local_port =
6829 *(uint16_t *)fnode_param->value;
6830 break;
6831 case ISCSI_FLASHNODE_IPV4_TOS:
6832 fnode_conn->ipv4_tos = fnode_param->value[0];
6833 break;
6834 case ISCSI_FLASHNODE_IPV6_TC:
6835 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
6836 break;
6837 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6838 fnode_conn->ipv6_flow_label = fnode_param->value[0];
6839 break;
6840 case ISCSI_FLASHNODE_NAME:
6841 rc = iscsi_switch_str_param(&fnode_sess->targetname,
6842 (char *)fnode_param->value);
6843 break;
6844 case ISCSI_FLASHNODE_TPGT:
6845 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
6846 break;
6847 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6848 memcpy(fnode_conn->link_local_ipv6_addr,
6849 fnode_param->value, IPv6_ADDR_LEN);
6850 break;
6851 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6852 fnode_sess->discovery_parent_idx =
6853 *(uint16_t *)fnode_param->value;
6854 break;
6855 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6856 fnode_conn->tcp_xmit_wsf =
6857 *(uint8_t *)fnode_param->value;
6858 break;
6859 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6860 fnode_conn->tcp_recv_wsf =
6861 *(uint8_t *)fnode_param->value;
6862 break;
6863 case ISCSI_FLASHNODE_STATSN:
6864 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
6865 break;
6866 case ISCSI_FLASHNODE_EXP_STATSN:
6867 fnode_conn->exp_statsn =
6868 *(uint32_t *)fnode_param->value;
6869 break;
6870 default:
6871 ql4_printk(KERN_ERR, ha,
6872 "%s: No such sysfs attribute\n", __func__);
6873 rc = -ENOSYS;
6874 goto exit_set_param;
6875 }
6876 }
6877
6878 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
6879
6880 exit_set_param:
6881 return rc;
6882 }
6883
6884 /**
6885 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
6886 * @fnode_sess: pointer to session attrs of flash ddb entry
6887 *
6888 * This invalidates the flash ddb entry at the given index
6889 **/
6890 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6891 {
6892 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6893 struct scsi_qla_host *ha = to_qla_host(shost);
6894 uint32_t dev_db_start_offset;
6895 uint32_t dev_db_end_offset;
6896 struct dev_db_entry *fw_ddb_entry = NULL;
6897 dma_addr_t fw_ddb_entry_dma;
6898 uint16_t *ddb_cookie = NULL;
6899 size_t ddb_size = 0;
6900 void *pddb = NULL;
6901 int target_id;
6902 int rc = 0;
6903
6904 if (fnode_sess->is_boot_target) {
6905 rc = -EPERM;
6906 DEBUG2(ql4_printk(KERN_ERR, ha,
6907 "%s: Deletion of boot target entry is not permitted.\n",
6908 __func__));
6909 goto exit_ddb_del;
6910 }
6911
6912 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
6913 goto sysfs_ddb_del;
6914
6915 if (is_qla40XX(ha)) {
6916 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6917 dev_db_end_offset = FLASH_OFFSET_DB_END;
6918 dev_db_start_offset += (fnode_sess->target_id *
6919 sizeof(*fw_ddb_entry));
6920 ddb_size = sizeof(*fw_ddb_entry);
6921 } else {
6922 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
6923 (ha->hw.flt_region_ddb << 2);
6924 /* flt_ddb_size is DDB table size for both ports
6925 * so divide it by 2 to calculate the offset for second port
6926 */
6927 if (ha->port_num == 1)
6928 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
6929
6930 dev_db_end_offset = dev_db_start_offset +
6931 (ha->hw.flt_ddb_size / 2);
6932
6933 dev_db_start_offset += (fnode_sess->target_id *
6934 sizeof(*fw_ddb_entry));
6935 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
6936
6937 ddb_size = sizeof(*ddb_cookie);
6938 }
6939
6940 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
6941 __func__, dev_db_start_offset, dev_db_end_offset));
6942
6943 if (dev_db_start_offset > dev_db_end_offset) {
6944 rc = -EIO;
6945 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
6946 __func__, fnode_sess->target_id));
6947 goto exit_ddb_del;
6948 }
6949
6950 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
6951 &fw_ddb_entry_dma, GFP_KERNEL);
6952 if (!pddb) {
6953 rc = -ENOMEM;
6954 DEBUG2(ql4_printk(KERN_ERR, ha,
6955 "%s: Unable to allocate dma buffer\n",
6956 __func__));
6957 goto exit_ddb_del;
6958 }
6959
6960 if (is_qla40XX(ha)) {
6961 fw_ddb_entry = pddb;
6962 memset(fw_ddb_entry, 0, ddb_size);
6963 ddb_cookie = &fw_ddb_entry->cookie;
6964 } else {
6965 ddb_cookie = pddb;
6966 }
6967
6968 /* invalidate the cookie */
6969 *ddb_cookie = 0xFFEE;
6970 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
6971 ddb_size, FLASH_OPT_RMW_COMMIT);
6972
6973 sysfs_ddb_del:
6974 target_id = fnode_sess->target_id;
6975 iscsi_destroy_flashnode_sess(fnode_sess);
6976 ql4_printk(KERN_INFO, ha,
6977 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
6978 __func__, target_id, ha->host_no);
6979 exit_ddb_del:
6980 if (pddb)
6981 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
6982 fw_ddb_entry_dma);
6983 return rc;
6984 }
6985
6986 /**
6987 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
6988 * @ha: pointer to adapter structure
6989 *
6990 * Export the firmware DDB for all send targets and normal targets to sysfs.
6991 **/
6992 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
6993 {
6994 struct dev_db_entry *fw_ddb_entry = NULL;
6995 dma_addr_t fw_ddb_entry_dma;
6996 uint16_t max_ddbs;
6997 uint16_t idx = 0;
6998 int ret = QLA_SUCCESS;
6999
7000 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
7001 sizeof(*fw_ddb_entry),
7002 &fw_ddb_entry_dma, GFP_KERNEL);
7003 if (!fw_ddb_entry) {
7004 DEBUG2(ql4_printk(KERN_ERR, ha,
7005 "%s: Unable to allocate dma buffer\n",
7006 __func__));
7007 return -ENOMEM;
7008 }
7009
7010 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7011 MAX_DEV_DB_ENTRIES;
7012
7013 for (idx = 0; idx < max_ddbs; idx++) {
7014 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
7015 idx))
7016 continue;
7017
7018 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
7019 if (ret) {
7020 ret = -EIO;
7021 break;
7022 }
7023 }
7024
7025 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
7026 fw_ddb_entry_dma);
7027
7028 return ret;
7029 }
7030
7031 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
7032 {
7033 iscsi_destroy_all_flashnode(ha->host);
7034 }
7035
7036 /**
7037 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
7038 * @ha: pointer to adapter structure
7039 * @is_reset: Is this init path or reset path
7040 *
7041 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
7042 * using connection open, then create the list of normal targets (nt)
7043 * from firmware DDBs. Based on the list of nt setup session and connection
7044 * objects.
7045 **/
7046 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
7047 {
7048 uint16_t tmo = 0;
7049 struct list_head list_st, list_nt;
7050 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
7051 unsigned long wtime;
7052
7053 if (!test_bit(AF_LINK_UP, &ha->flags)) {
7054 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
7055 ha->is_reset = is_reset;
7056 return;
7057 }
7058
7059 INIT_LIST_HEAD(&list_st);
7060 INIT_LIST_HEAD(&list_nt);
7061
7062 qla4xxx_build_st_list(ha, &list_st);
7063
7064 /* Before issuing conn open mbox, ensure all IPs states are configured
7065 * Note, conn open fails if IPs are not configured
7066 */
7067 qla4xxx_wait_for_ip_configuration(ha);
7068
7069 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
7070 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
7071 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
7072 }
7073
7074 /* Wait to ensure all sendtargets are done for min 12 sec wait */
7075 tmo = ((ha->def_timeout > LOGIN_TOV) &&
7076 (ha->def_timeout < LOGIN_TOV * 10) ?
7077 ha->def_timeout : LOGIN_TOV);
7078
7079 DEBUG2(ql4_printk(KERN_INFO, ha,
7080 "Default time to wait for build ddb %d\n", tmo));
7081
7082 wtime = jiffies + (HZ * tmo);
7083 do {
7084 if (list_empty(&list_st))
7085 break;
7086
7087 qla4xxx_remove_failed_ddb(ha, &list_st);
7088 schedule_timeout_uninterruptible(HZ / 10);
7089 } while (time_after(wtime, jiffies));
7090
7091
7092 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
7093
7094 qla4xxx_free_ddb_list(&list_st);
7095 qla4xxx_free_ddb_list(&list_nt);
7096
7097 qla4xxx_free_ddb_index(ha);
7098 }
7099
7100 /**
7101 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
7102 * response.
7103 * @ha: pointer to adapter structure
7104 *
7105 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
7106 * set in DDB and we will wait for login response of boot targets during
7107 * probe.
7108 **/
7109 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
7110 {
7111 struct ddb_entry *ddb_entry;
7112 struct dev_db_entry *fw_ddb_entry = NULL;
7113 dma_addr_t fw_ddb_entry_dma;
7114 unsigned long wtime;
7115 uint32_t ddb_state;
7116 int max_ddbs, idx, ret;
7117
7118 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7119 MAX_DEV_DB_ENTRIES;
7120
7121 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7122 &fw_ddb_entry_dma, GFP_KERNEL);
7123 if (!fw_ddb_entry) {
7124 ql4_printk(KERN_ERR, ha,
7125 "%s: Unable to allocate dma buffer\n", __func__);
7126 goto exit_login_resp;
7127 }
7128
7129 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
7130
7131 for (idx = 0; idx < max_ddbs; idx++) {
7132 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7133 if (ddb_entry == NULL)
7134 continue;
7135
7136 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7137 DEBUG2(ql4_printk(KERN_INFO, ha,
7138 "%s: DDB index [%d]\n", __func__,
7139 ddb_entry->fw_ddb_index));
7140 do {
7141 ret = qla4xxx_get_fwddb_entry(ha,
7142 ddb_entry->fw_ddb_index,
7143 fw_ddb_entry, fw_ddb_entry_dma,
7144 NULL, NULL, &ddb_state, NULL,
7145 NULL, NULL);
7146 if (ret == QLA_ERROR)
7147 goto exit_login_resp;
7148
7149 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
7150 (ddb_state == DDB_DS_SESSION_FAILED))
7151 break;
7152
7153 schedule_timeout_uninterruptible(HZ);
7154
7155 } while ((time_after(wtime, jiffies)));
7156
7157 if (!time_after(wtime, jiffies)) {
7158 DEBUG2(ql4_printk(KERN_INFO, ha,
7159 "%s: Login response wait timer expired\n",
7160 __func__));
7161 goto exit_login_resp;
7162 }
7163 }
7164 }
7165
7166 exit_login_resp:
7167 if (fw_ddb_entry)
7168 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7169 fw_ddb_entry, fw_ddb_entry_dma);
7170 }
7171
7172 /**
7173 * qla4xxx_probe_adapter - callback function to probe HBA
7174 * @pdev: pointer to pci_dev structure
7175 * @pci_device_id: pointer to pci_device entry
7176 *
7177 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
7178 * It returns zero if successful. It also initializes all data necessary for
7179 * the driver.
7180 **/
7181 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
7182 const struct pci_device_id *ent)
7183 {
7184 int ret = -ENODEV, status;
7185 struct Scsi_Host *host;
7186 struct scsi_qla_host *ha;
7187 uint8_t init_retry_count = 0;
7188 char buf[34];
7189 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
7190 uint32_t dev_state;
7191
7192 if (pci_enable_device(pdev))
7193 return -1;
7194
7195 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
7196 if (host == NULL) {
7197 printk(KERN_WARNING
7198 "qla4xxx: Couldn't allocate host from scsi layer!\n");
7199 goto probe_disable_device;
7200 }
7201
7202 /* Clear our data area */
7203 ha = to_qla_host(host);
7204 memset(ha, 0, sizeof(*ha));
7205
7206 /* Save the information from PCI BIOS. */
7207 ha->pdev = pdev;
7208 ha->host = host;
7209 ha->host_no = host->host_no;
7210 ha->func_num = PCI_FUNC(ha->pdev->devfn);
7211
7212 pci_enable_pcie_error_reporting(pdev);
7213
7214 /* Setup Runtime configurable options */
7215 if (is_qla8022(ha)) {
7216 ha->isp_ops = &qla4_82xx_isp_ops;
7217 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
7218 ha->qdr_sn_window = -1;
7219 ha->ddr_mn_window = -1;
7220 ha->curr_window = 255;
7221 nx_legacy_intr = &legacy_intr[ha->func_num];
7222 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
7223 ha->nx_legacy_intr.tgt_status_reg =
7224 nx_legacy_intr->tgt_status_reg;
7225 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
7226 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
7227 } else if (is_qla8032(ha) || is_qla8042(ha)) {
7228 ha->isp_ops = &qla4_83xx_isp_ops;
7229 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
7230 } else {
7231 ha->isp_ops = &qla4xxx_isp_ops;
7232 }
7233
7234 if (is_qla80XX(ha)) {
7235 rwlock_init(&ha->hw_lock);
7236 ha->pf_bit = ha->func_num << 16;
7237 /* Set EEH reset type to fundamental if required by hba */
7238 pdev->needs_freset = 1;
7239 }
7240
7241 /* Configure PCI I/O space. */
7242 ret = ha->isp_ops->iospace_config(ha);
7243 if (ret)
7244 goto probe_failed_ioconfig;
7245
7246 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
7247 pdev->device, pdev->irq, ha->reg);
7248
7249 qla4xxx_config_dma_addressing(ha);
7250
7251 /* Initialize lists and spinlocks. */
7252 INIT_LIST_HEAD(&ha->free_srb_q);
7253
7254 mutex_init(&ha->mbox_sem);
7255 mutex_init(&ha->chap_sem);
7256 init_completion(&ha->mbx_intr_comp);
7257 init_completion(&ha->disable_acb_comp);
7258
7259 spin_lock_init(&ha->hardware_lock);
7260 spin_lock_init(&ha->work_lock);
7261
7262 /* Initialize work list */
7263 INIT_LIST_HEAD(&ha->work_list);
7264
7265 /* Allocate dma buffers */
7266 if (qla4xxx_mem_alloc(ha)) {
7267 ql4_printk(KERN_WARNING, ha,
7268 "[ERROR] Failed to allocate memory for adapter\n");
7269
7270 ret = -ENOMEM;
7271 goto probe_failed;
7272 }
7273
7274 host->cmd_per_lun = 3;
7275 host->max_channel = 0;
7276 host->max_lun = MAX_LUNS - 1;
7277 host->max_id = MAX_TARGETS;
7278 host->max_cmd_len = IOCB_MAX_CDB_LEN;
7279 host->can_queue = MAX_SRBS ;
7280 host->transportt = qla4xxx_scsi_transport;
7281
7282 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
7283 if (ret) {
7284 ql4_printk(KERN_WARNING, ha,
7285 "%s: scsi_init_shared_tag_map failed\n", __func__);
7286 goto probe_failed;
7287 }
7288
7289 pci_set_drvdata(pdev, ha);
7290
7291 ret = scsi_add_host(host, &pdev->dev);
7292 if (ret)
7293 goto probe_failed;
7294
7295 if (is_qla80XX(ha))
7296 qla4_8xxx_get_flash_info(ha);
7297
7298 if (is_qla8032(ha) || is_qla8042(ha)) {
7299 qla4_83xx_read_reset_template(ha);
7300 /*
7301 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
7302 * If DONRESET_BIT0 is set, drivers should not set dev_state
7303 * to NEED_RESET. But if NEED_RESET is set, drivers should
7304 * should honor the reset.
7305 */
7306 if (ql4xdontresethba == 1)
7307 qla4_83xx_set_idc_dontreset(ha);
7308 }
7309
7310 /*
7311 * Initialize the Host adapter request/response queues and
7312 * firmware
7313 * NOTE: interrupts enabled upon successful completion
7314 */
7315 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7316
7317 /* Dont retry adapter initialization if IRQ allocation failed */
7318 if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7319 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7320 __func__);
7321 goto skip_retry_init;
7322 }
7323
7324 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
7325 init_retry_count++ < MAX_INIT_RETRIES) {
7326
7327 if (is_qla80XX(ha)) {
7328 ha->isp_ops->idc_lock(ha);
7329 dev_state = qla4_8xxx_rd_direct(ha,
7330 QLA8XXX_CRB_DEV_STATE);
7331 ha->isp_ops->idc_unlock(ha);
7332 if (dev_state == QLA8XXX_DEV_FAILED) {
7333 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
7334 "initialize adapter. H/W is in failed state\n",
7335 __func__);
7336 break;
7337 }
7338 }
7339 DEBUG2(printk("scsi: %s: retrying adapter initialization "
7340 "(%d)\n", __func__, init_retry_count));
7341
7342 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
7343 continue;
7344
7345 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7346 }
7347
7348 skip_retry_init:
7349 if (!test_bit(AF_ONLINE, &ha->flags)) {
7350 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7351
7352 if ((is_qla8022(ha) && ql4xdontresethba) ||
7353 ((is_qla8032(ha) || is_qla8042(ha)) &&
7354 qla4_83xx_idc_dontreset(ha))) {
7355 /* Put the device in failed state. */
7356 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7357 ha->isp_ops->idc_lock(ha);
7358 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7359 QLA8XXX_DEV_FAILED);
7360 ha->isp_ops->idc_unlock(ha);
7361 }
7362 ret = -ENODEV;
7363 goto remove_host;
7364 }
7365
7366 /* Startup the kernel thread for this host adapter. */
7367 DEBUG2(printk("scsi: %s: Starting kernel thread for "
7368 "qla4xxx_dpc\n", __func__));
7369 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
7370 ha->dpc_thread = create_singlethread_workqueue(buf);
7371 if (!ha->dpc_thread) {
7372 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
7373 ret = -ENODEV;
7374 goto remove_host;
7375 }
7376 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
7377
7378 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
7379 ha->host_no);
7380 if (!ha->task_wq) {
7381 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
7382 ret = -ENODEV;
7383 goto remove_host;
7384 }
7385
7386 /*
7387 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
7388 * (which is called indirectly by qla4xxx_initialize_adapter),
7389 * so that irqs will be registered after crbinit but before
7390 * mbx_intr_enable.
7391 */
7392 if (is_qla40XX(ha)) {
7393 ret = qla4xxx_request_irqs(ha);
7394 if (ret) {
7395 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
7396 "interrupt %d already in use.\n", pdev->irq);
7397 goto remove_host;
7398 }
7399 }
7400
7401 pci_save_state(ha->pdev);
7402 ha->isp_ops->enable_intrs(ha);
7403
7404 /* Start timer thread. */
7405 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
7406
7407 set_bit(AF_INIT_DONE, &ha->flags);
7408
7409 qla4_8xxx_alloc_sysfs_attr(ha);
7410
7411 printk(KERN_INFO
7412 " QLogic iSCSI HBA Driver version: %s\n"
7413 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7414 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7415 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
7416 ha->fw_info.fw_patch, ha->fw_info.fw_build);
7417
7418 /* Set the driver version */
7419 if (is_qla80XX(ha))
7420 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
7421
7422 if (qla4xxx_setup_boot_info(ha))
7423 ql4_printk(KERN_ERR, ha,
7424 "%s: No iSCSI boot target configured\n", __func__);
7425
7426 if (qla4xxx_sysfs_ddb_export(ha))
7427 ql4_printk(KERN_ERR, ha,
7428 "%s: Error exporting ddb to sysfs\n", __func__);
7429
7430 /* Perform the build ddb list and login to each */
7431 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
7432 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
7433 qla4xxx_wait_login_resp_boot_tgt(ha);
7434
7435 qla4xxx_create_chap_list(ha);
7436
7437 qla4xxx_create_ifaces(ha);
7438 return 0;
7439
7440 remove_host:
7441 scsi_remove_host(ha->host);
7442
7443 probe_failed:
7444 qla4xxx_free_adapter(ha);
7445
7446 probe_failed_ioconfig:
7447 pci_disable_pcie_error_reporting(pdev);
7448 scsi_host_put(ha->host);
7449
7450 probe_disable_device:
7451 pci_disable_device(pdev);
7452
7453 return ret;
7454 }
7455
7456 /**
7457 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
7458 * @ha: pointer to adapter structure
7459 *
7460 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
7461 * so that the other port will not re-initialize while in the process of
7462 * removing the ha due to driver unload or hba hotplug.
7463 **/
7464 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
7465 {
7466 struct scsi_qla_host *other_ha = NULL;
7467 struct pci_dev *other_pdev = NULL;
7468 int fn = ISP4XXX_PCI_FN_2;
7469
7470 /*iscsi function numbers for ISP4xxx is 1 and 3*/
7471 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
7472 fn = ISP4XXX_PCI_FN_1;
7473
7474 other_pdev =
7475 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7476 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7477 fn));
7478
7479 /* Get other_ha if other_pdev is valid and state is enable*/
7480 if (other_pdev) {
7481 if (atomic_read(&other_pdev->enable_cnt)) {
7482 other_ha = pci_get_drvdata(other_pdev);
7483 if (other_ha) {
7484 set_bit(AF_HA_REMOVAL, &other_ha->flags);
7485 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
7486 "Prevent %s reinit\n", __func__,
7487 dev_name(&other_ha->pdev->dev)));
7488 }
7489 }
7490 pci_dev_put(other_pdev);
7491 }
7492 }
7493
7494 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
7495 {
7496 struct ddb_entry *ddb_entry;
7497 int options;
7498 int idx;
7499
7500 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7501
7502 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7503 if ((ddb_entry != NULL) &&
7504 (ddb_entry->ddb_type == FLASH_DDB)) {
7505
7506 options = LOGOUT_OPTION_CLOSE_SESSION;
7507 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
7508 == QLA_ERROR)
7509 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
7510 __func__);
7511
7512 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7513 /*
7514 * we have decremented the reference count of the driver
7515 * when we setup the session to have the driver unload
7516 * to be seamless without actually destroying the
7517 * session
7518 **/
7519 try_module_get(qla4xxx_iscsi_transport.owner);
7520 iscsi_destroy_endpoint(ddb_entry->conn->ep);
7521 qla4xxx_free_ddb(ha, ddb_entry);
7522 iscsi_session_teardown(ddb_entry->sess);
7523 }
7524 }
7525 }
7526 /**
7527 * qla4xxx_remove_adapter - callback function to remove adapter.
7528 * @pci_dev: PCI device pointer
7529 **/
7530 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
7531 {
7532 struct scsi_qla_host *ha;
7533
7534 /*
7535 * If the PCI device is disabled then it means probe_adapter had
7536 * failed and resources already cleaned up on probe_adapter exit.
7537 */
7538 if (!pci_is_enabled(pdev))
7539 return;
7540
7541 ha = pci_get_drvdata(pdev);
7542
7543 if (is_qla40XX(ha))
7544 qla4xxx_prevent_other_port_reinit(ha);
7545
7546 /* destroy iface from sysfs */
7547 qla4xxx_destroy_ifaces(ha);
7548
7549 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
7550 iscsi_boot_destroy_kset(ha->boot_kset);
7551
7552 qla4xxx_destroy_fw_ddb_session(ha);
7553 qla4_8xxx_free_sysfs_attr(ha);
7554
7555 qla4xxx_sysfs_ddb_remove(ha);
7556 scsi_remove_host(ha->host);
7557
7558 qla4xxx_free_adapter(ha);
7559
7560 scsi_host_put(ha->host);
7561
7562 pci_disable_pcie_error_reporting(pdev);
7563 pci_disable_device(pdev);
7564 pci_set_drvdata(pdev, NULL);
7565 }
7566
7567 /**
7568 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
7569 * @ha: HA context
7570 *
7571 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
7572 * supported addressing method.
7573 */
7574 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
7575 {
7576 int retval;
7577
7578 /* Update our PCI device dma_mask for full 64 bit mask */
7579 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
7580 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7581 dev_dbg(&ha->pdev->dev,
7582 "Failed to set 64 bit PCI consistent mask; "
7583 "using 32 bit.\n");
7584 retval = pci_set_consistent_dma_mask(ha->pdev,
7585 DMA_BIT_MASK(32));
7586 }
7587 } else
7588 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
7589 }
7590
7591 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
7592 {
7593 struct iscsi_cls_session *cls_sess;
7594 struct iscsi_session *sess;
7595 struct ddb_entry *ddb;
7596 int queue_depth = QL4_DEF_QDEPTH;
7597
7598 cls_sess = starget_to_session(sdev->sdev_target);
7599 sess = cls_sess->dd_data;
7600 ddb = sess->dd_data;
7601
7602 sdev->hostdata = ddb;
7603 sdev->tagged_supported = 1;
7604
7605 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
7606 queue_depth = ql4xmaxqdepth;
7607
7608 scsi_activate_tcq(sdev, queue_depth);
7609 return 0;
7610 }
7611
7612 static int qla4xxx_slave_configure(struct scsi_device *sdev)
7613 {
7614 sdev->tagged_supported = 1;
7615 return 0;
7616 }
7617
7618 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
7619 {
7620 scsi_deactivate_tcq(sdev, 1);
7621 }
7622
7623 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
7624 int reason)
7625 {
7626 if (!ql4xqfulltracking)
7627 return -EOPNOTSUPP;
7628
7629 return iscsi_change_queue_depth(sdev, qdepth, reason);
7630 }
7631
7632 /**
7633 * qla4xxx_del_from_active_array - returns an active srb
7634 * @ha: Pointer to host adapter structure.
7635 * @index: index into the active_array
7636 *
7637 * This routine removes and returns the srb at the specified index
7638 **/
7639 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
7640 uint32_t index)
7641 {
7642 struct srb *srb = NULL;
7643 struct scsi_cmnd *cmd = NULL;
7644
7645 cmd = scsi_host_find_tag(ha->host, index);
7646 if (!cmd)
7647 return srb;
7648
7649 srb = (struct srb *)CMD_SP(cmd);
7650 if (!srb)
7651 return srb;
7652
7653 /* update counters */
7654 if (srb->flags & SRB_DMA_VALID) {
7655 ha->iocb_cnt -= srb->iocb_cnt;
7656 if (srb->cmd)
7657 srb->cmd->host_scribble =
7658 (unsigned char *)(unsigned long) MAX_SRBS;
7659 }
7660 return srb;
7661 }
7662
7663 /**
7664 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
7665 * @ha: Pointer to host adapter structure.
7666 * @cmd: Scsi Command to wait on.
7667 *
7668 * This routine waits for the command to be returned by the Firmware
7669 * for some max time.
7670 **/
7671 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
7672 struct scsi_cmnd *cmd)
7673 {
7674 int done = 0;
7675 struct srb *rp;
7676 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
7677 int ret = SUCCESS;
7678
7679 /* Dont wait on command if PCI error is being handled
7680 * by PCI AER driver
7681 */
7682 if (unlikely(pci_channel_offline(ha->pdev)) ||
7683 (test_bit(AF_EEH_BUSY, &ha->flags))) {
7684 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
7685 ha->host_no, __func__);
7686 return ret;
7687 }
7688
7689 do {
7690 /* Checking to see if its returned to OS */
7691 rp = (struct srb *) CMD_SP(cmd);
7692 if (rp == NULL) {
7693 done++;
7694 break;
7695 }
7696
7697 msleep(2000);
7698 } while (max_wait_time--);
7699
7700 return done;
7701 }
7702
7703 /**
7704 * qla4xxx_wait_for_hba_online - waits for HBA to come online
7705 * @ha: Pointer to host adapter structure
7706 **/
7707 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
7708 {
7709 unsigned long wait_online;
7710
7711 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
7712 while (time_before(jiffies, wait_online)) {
7713
7714 if (adapter_up(ha))
7715 return QLA_SUCCESS;
7716
7717 msleep(2000);
7718 }
7719
7720 return QLA_ERROR;
7721 }
7722
7723 /**
7724 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
7725 * @ha: pointer to HBA
7726 * @t: target id
7727 * @l: lun id
7728 *
7729 * This function waits for all outstanding commands to a lun to complete. It
7730 * returns 0 if all pending commands are returned and 1 otherwise.
7731 **/
7732 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
7733 struct scsi_target *stgt,
7734 struct scsi_device *sdev)
7735 {
7736 int cnt;
7737 int status = 0;
7738 struct scsi_cmnd *cmd;
7739
7740 /*
7741 * Waiting for all commands for the designated target or dev
7742 * in the active array
7743 */
7744 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
7745 cmd = scsi_host_find_tag(ha->host, cnt);
7746 if (cmd && stgt == scsi_target(cmd->device) &&
7747 (!sdev || sdev == cmd->device)) {
7748 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7749 status++;
7750 break;
7751 }
7752 }
7753 }
7754 return status;
7755 }
7756
7757 /**
7758 * qla4xxx_eh_abort - callback for abort task.
7759 * @cmd: Pointer to Linux's SCSI command structure
7760 *
7761 * This routine is called by the Linux OS to abort the specified
7762 * command.
7763 **/
7764 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
7765 {
7766 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7767 unsigned int id = cmd->device->id;
7768 unsigned int lun = cmd->device->lun;
7769 unsigned long flags;
7770 struct srb *srb = NULL;
7771 int ret = SUCCESS;
7772 int wait = 0;
7773
7774 ql4_printk(KERN_INFO, ha,
7775 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
7776 ha->host_no, id, lun, cmd);
7777
7778 spin_lock_irqsave(&ha->hardware_lock, flags);
7779 srb = (struct srb *) CMD_SP(cmd);
7780 if (!srb) {
7781 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7782 return SUCCESS;
7783 }
7784 kref_get(&srb->srb_ref);
7785 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7786
7787 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
7788 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
7789 ha->host_no, id, lun));
7790 ret = FAILED;
7791 } else {
7792 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
7793 ha->host_no, id, lun));
7794 wait = 1;
7795 }
7796
7797 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
7798
7799 /* Wait for command to complete */
7800 if (wait) {
7801 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7802 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
7803 ha->host_no, id, lun));
7804 ret = FAILED;
7805 }
7806 }
7807
7808 ql4_printk(KERN_INFO, ha,
7809 "scsi%ld:%d:%d: Abort command - %s\n",
7810 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
7811
7812 return ret;
7813 }
7814
7815 /**
7816 * qla4xxx_eh_device_reset - callback for target reset.
7817 * @cmd: Pointer to Linux's SCSI command structure
7818 *
7819 * This routine is called by the Linux OS to reset all luns on the
7820 * specified target.
7821 **/
7822 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
7823 {
7824 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7825 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7826 int ret = FAILED, stat;
7827
7828 if (!ddb_entry)
7829 return ret;
7830
7831 ret = iscsi_block_scsi_eh(cmd);
7832 if (ret)
7833 return ret;
7834 ret = FAILED;
7835
7836 ql4_printk(KERN_INFO, ha,
7837 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
7838 cmd->device->channel, cmd->device->id, cmd->device->lun);
7839
7840 DEBUG2(printk(KERN_INFO
7841 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
7842 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
7843 cmd, jiffies, cmd->request->timeout / HZ,
7844 ha->dpc_flags, cmd->result, cmd->allowed));
7845
7846 /* FIXME: wait for hba to go online */
7847 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
7848 if (stat != QLA_SUCCESS) {
7849 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
7850 goto eh_dev_reset_done;
7851 }
7852
7853 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7854 cmd->device)) {
7855 ql4_printk(KERN_INFO, ha,
7856 "DEVICE RESET FAILED - waiting for "
7857 "commands.\n");
7858 goto eh_dev_reset_done;
7859 }
7860
7861 /* Send marker. */
7862 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7863 MM_LUN_RESET) != QLA_SUCCESS)
7864 goto eh_dev_reset_done;
7865
7866 ql4_printk(KERN_INFO, ha,
7867 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
7868 ha->host_no, cmd->device->channel, cmd->device->id,
7869 cmd->device->lun);
7870
7871 ret = SUCCESS;
7872
7873 eh_dev_reset_done:
7874
7875 return ret;
7876 }
7877
7878 /**
7879 * qla4xxx_eh_target_reset - callback for target reset.
7880 * @cmd: Pointer to Linux's SCSI command structure
7881 *
7882 * This routine is called by the Linux OS to reset the target.
7883 **/
7884 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
7885 {
7886 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7887 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7888 int stat, ret;
7889
7890 if (!ddb_entry)
7891 return FAILED;
7892
7893 ret = iscsi_block_scsi_eh(cmd);
7894 if (ret)
7895 return ret;
7896
7897 starget_printk(KERN_INFO, scsi_target(cmd->device),
7898 "WARM TARGET RESET ISSUED.\n");
7899
7900 DEBUG2(printk(KERN_INFO
7901 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
7902 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
7903 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
7904 ha->dpc_flags, cmd->result, cmd->allowed));
7905
7906 stat = qla4xxx_reset_target(ha, ddb_entry);
7907 if (stat != QLA_SUCCESS) {
7908 starget_printk(KERN_INFO, scsi_target(cmd->device),
7909 "WARM TARGET RESET FAILED.\n");
7910 return FAILED;
7911 }
7912
7913 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7914 NULL)) {
7915 starget_printk(KERN_INFO, scsi_target(cmd->device),
7916 "WARM TARGET DEVICE RESET FAILED - "
7917 "waiting for commands.\n");
7918 return FAILED;
7919 }
7920
7921 /* Send marker. */
7922 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7923 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
7924 starget_printk(KERN_INFO, scsi_target(cmd->device),
7925 "WARM TARGET DEVICE RESET FAILED - "
7926 "marker iocb failed.\n");
7927 return FAILED;
7928 }
7929
7930 starget_printk(KERN_INFO, scsi_target(cmd->device),
7931 "WARM TARGET RESET SUCCEEDED.\n");
7932 return SUCCESS;
7933 }
7934
7935 /**
7936 * qla4xxx_is_eh_active - check if error handler is running
7937 * @shost: Pointer to SCSI Host struct
7938 *
7939 * This routine finds that if reset host is called in EH
7940 * scenario or from some application like sg_reset
7941 **/
7942 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
7943 {
7944 if (shost->shost_state == SHOST_RECOVERY)
7945 return 1;
7946 return 0;
7947 }
7948
7949 /**
7950 * qla4xxx_eh_host_reset - kernel callback
7951 * @cmd: Pointer to Linux's SCSI command structure
7952 *
7953 * This routine is invoked by the Linux kernel to perform fatal error
7954 * recovery on the specified adapter.
7955 **/
7956 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
7957 {
7958 int return_status = FAILED;
7959 struct scsi_qla_host *ha;
7960
7961 ha = to_qla_host(cmd->device->host);
7962
7963 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
7964 qla4_83xx_set_idc_dontreset(ha);
7965
7966 /*
7967 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
7968 * protocol drivers, we should not set device_state to NEED_RESET
7969 */
7970 if (ql4xdontresethba ||
7971 ((is_qla8032(ha) || is_qla8042(ha)) &&
7972 qla4_83xx_idc_dontreset(ha))) {
7973 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
7974 ha->host_no, __func__));
7975
7976 /* Clear outstanding srb in queues */
7977 if (qla4xxx_is_eh_active(cmd->device->host))
7978 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
7979
7980 return FAILED;
7981 }
7982
7983 ql4_printk(KERN_INFO, ha,
7984 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
7985 cmd->device->channel, cmd->device->id, cmd->device->lun);
7986
7987 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
7988 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
7989 "DEAD.\n", ha->host_no, cmd->device->channel,
7990 __func__));
7991
7992 return FAILED;
7993 }
7994
7995 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7996 if (is_qla80XX(ha))
7997 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
7998 else
7999 set_bit(DPC_RESET_HA, &ha->dpc_flags);
8000 }
8001
8002 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
8003 return_status = SUCCESS;
8004
8005 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
8006 return_status == FAILED ? "FAILED" : "SUCCEEDED");
8007
8008 return return_status;
8009 }
8010
8011 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
8012 {
8013 uint32_t mbox_cmd[MBOX_REG_COUNT];
8014 uint32_t mbox_sts[MBOX_REG_COUNT];
8015 struct addr_ctrl_blk_def *acb = NULL;
8016 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
8017 int rval = QLA_SUCCESS;
8018 dma_addr_t acb_dma;
8019
8020 acb = dma_alloc_coherent(&ha->pdev->dev,
8021 sizeof(struct addr_ctrl_blk_def),
8022 &acb_dma, GFP_KERNEL);
8023 if (!acb) {
8024 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
8025 __func__);
8026 rval = -ENOMEM;
8027 goto exit_port_reset;
8028 }
8029
8030 memset(acb, 0, acb_len);
8031
8032 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
8033 if (rval != QLA_SUCCESS) {
8034 rval = -EIO;
8035 goto exit_free_acb;
8036 }
8037
8038 rval = qla4xxx_disable_acb(ha);
8039 if (rval != QLA_SUCCESS) {
8040 rval = -EIO;
8041 goto exit_free_acb;
8042 }
8043
8044 wait_for_completion_timeout(&ha->disable_acb_comp,
8045 DISABLE_ACB_TOV * HZ);
8046
8047 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
8048 if (rval != QLA_SUCCESS) {
8049 rval = -EIO;
8050 goto exit_free_acb;
8051 }
8052
8053 exit_free_acb:
8054 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
8055 acb, acb_dma);
8056 exit_port_reset:
8057 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
8058 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
8059 return rval;
8060 }
8061
8062 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
8063 {
8064 struct scsi_qla_host *ha = to_qla_host(shost);
8065 int rval = QLA_SUCCESS;
8066 uint32_t idc_ctrl;
8067
8068 if (ql4xdontresethba) {
8069 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
8070 __func__));
8071 rval = -EPERM;
8072 goto exit_host_reset;
8073 }
8074
8075 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
8076 goto recover_adapter;
8077
8078 switch (reset_type) {
8079 case SCSI_ADAPTER_RESET:
8080 set_bit(DPC_RESET_HA, &ha->dpc_flags);
8081 break;
8082 case SCSI_FIRMWARE_RESET:
8083 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8084 if (is_qla80XX(ha))
8085 /* set firmware context reset */
8086 set_bit(DPC_RESET_HA_FW_CONTEXT,
8087 &ha->dpc_flags);
8088 else {
8089 rval = qla4xxx_context_reset(ha);
8090 goto exit_host_reset;
8091 }
8092 }
8093 break;
8094 }
8095
8096 recover_adapter:
8097 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
8098 * reset is issued by application */
8099 if ((is_qla8032(ha) || is_qla8042(ha)) &&
8100 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8101 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
8102 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
8103 (idc_ctrl | GRACEFUL_RESET_BIT1));
8104 }
8105
8106 rval = qla4xxx_recover_adapter(ha);
8107 if (rval != QLA_SUCCESS) {
8108 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
8109 __func__));
8110 rval = -EIO;
8111 }
8112
8113 exit_host_reset:
8114 return rval;
8115 }
8116
8117 /* PCI AER driver recovers from all correctable errors w/o
8118 * driver intervention. For uncorrectable errors PCI AER
8119 * driver calls the following device driver's callbacks
8120 *
8121 * - Fatal Errors - link_reset
8122 * - Non-Fatal Errors - driver's pci_error_detected() which
8123 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
8124 *
8125 * PCI AER driver calls
8126 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
8127 * returns RECOVERED or NEED_RESET if fw_hung
8128 * NEED_RESET - driver's slot_reset()
8129 * DISCONNECT - device is dead & cannot recover
8130 * RECOVERED - driver's pci_resume()
8131 */
8132 static pci_ers_result_t
8133 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8134 {
8135 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8136
8137 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
8138 ha->host_no, __func__, state);
8139
8140 if (!is_aer_supported(ha))
8141 return PCI_ERS_RESULT_NONE;
8142
8143 switch (state) {
8144 case pci_channel_io_normal:
8145 clear_bit(AF_EEH_BUSY, &ha->flags);
8146 return PCI_ERS_RESULT_CAN_RECOVER;
8147 case pci_channel_io_frozen:
8148 set_bit(AF_EEH_BUSY, &ha->flags);
8149 qla4xxx_mailbox_premature_completion(ha);
8150 qla4xxx_free_irqs(ha);
8151 pci_disable_device(pdev);
8152 /* Return back all IOs */
8153 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
8154 return PCI_ERS_RESULT_NEED_RESET;
8155 case pci_channel_io_perm_failure:
8156 set_bit(AF_EEH_BUSY, &ha->flags);
8157 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
8158 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
8159 return PCI_ERS_RESULT_DISCONNECT;
8160 }
8161 return PCI_ERS_RESULT_NEED_RESET;
8162 }
8163
8164 /**
8165 * qla4xxx_pci_mmio_enabled() gets called if
8166 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
8167 * and read/write to the device still works.
8168 **/
8169 static pci_ers_result_t
8170 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
8171 {
8172 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8173
8174 if (!is_aer_supported(ha))
8175 return PCI_ERS_RESULT_NONE;
8176
8177 return PCI_ERS_RESULT_RECOVERED;
8178 }
8179
8180 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
8181 {
8182 uint32_t rval = QLA_ERROR;
8183 int fn;
8184 struct pci_dev *other_pdev = NULL;
8185
8186 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
8187
8188 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8189
8190 if (test_bit(AF_ONLINE, &ha->flags)) {
8191 clear_bit(AF_ONLINE, &ha->flags);
8192 clear_bit(AF_LINK_UP, &ha->flags);
8193 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
8194 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
8195 }
8196
8197 fn = PCI_FUNC(ha->pdev->devfn);
8198 while (fn > 0) {
8199 fn--;
8200 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
8201 "func %x\n", ha->host_no, __func__, fn);
8202 /* Get the pci device given the domain, bus,
8203 * slot/function number */
8204 other_pdev =
8205 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8206 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8207 fn));
8208
8209 if (!other_pdev)
8210 continue;
8211
8212 if (atomic_read(&other_pdev->enable_cnt)) {
8213 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
8214 "func in enabled state%x\n", ha->host_no,
8215 __func__, fn);
8216 pci_dev_put(other_pdev);
8217 break;
8218 }
8219 pci_dev_put(other_pdev);
8220 }
8221
8222 /* The first function on the card, the reset owner will
8223 * start & initialize the firmware. The other functions
8224 * on the card will reset the firmware context
8225 */
8226 if (!fn) {
8227 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
8228 "0x%x is the owner\n", ha->host_no, __func__,
8229 ha->pdev->devfn);
8230
8231 ha->isp_ops->idc_lock(ha);
8232 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8233 QLA8XXX_DEV_COLD);
8234 ha->isp_ops->idc_unlock(ha);
8235
8236 rval = qla4_8xxx_update_idc_reg(ha);
8237 if (rval == QLA_ERROR) {
8238 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
8239 ha->host_no, __func__);
8240 ha->isp_ops->idc_lock(ha);
8241 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8242 QLA8XXX_DEV_FAILED);
8243 ha->isp_ops->idc_unlock(ha);
8244 goto exit_error_recovery;
8245 }
8246
8247 clear_bit(AF_FW_RECOVERY, &ha->flags);
8248 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8249
8250 if (rval != QLA_SUCCESS) {
8251 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8252 "FAILED\n", ha->host_no, __func__);
8253 ha->isp_ops->idc_lock(ha);
8254 qla4_8xxx_clear_drv_active(ha);
8255 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8256 QLA8XXX_DEV_FAILED);
8257 ha->isp_ops->idc_unlock(ha);
8258 } else {
8259 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8260 "READY\n", ha->host_no, __func__);
8261 ha->isp_ops->idc_lock(ha);
8262 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8263 QLA8XXX_DEV_READY);
8264 /* Clear driver state register */
8265 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
8266 qla4_8xxx_set_drv_active(ha);
8267 ha->isp_ops->idc_unlock(ha);
8268 ha->isp_ops->enable_intrs(ha);
8269 }
8270 } else {
8271 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
8272 "the reset owner\n", ha->host_no, __func__,
8273 ha->pdev->devfn);
8274 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
8275 QLA8XXX_DEV_READY)) {
8276 clear_bit(AF_FW_RECOVERY, &ha->flags);
8277 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8278 if (rval == QLA_SUCCESS)
8279 ha->isp_ops->enable_intrs(ha);
8280
8281 ha->isp_ops->idc_lock(ha);
8282 qla4_8xxx_set_drv_active(ha);
8283 ha->isp_ops->idc_unlock(ha);
8284 }
8285 }
8286 exit_error_recovery:
8287 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8288 return rval;
8289 }
8290
8291 static pci_ers_result_t
8292 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
8293 {
8294 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
8295 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8296 int rc;
8297
8298 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
8299 ha->host_no, __func__);
8300
8301 if (!is_aer_supported(ha))
8302 return PCI_ERS_RESULT_NONE;
8303
8304 /* Restore the saved state of PCIe device -
8305 * BAR registers, PCI Config space, PCIX, MSI,
8306 * IOV states
8307 */
8308 pci_restore_state(pdev);
8309
8310 /* pci_restore_state() clears the saved_state flag of the device
8311 * save restored state which resets saved_state flag
8312 */
8313 pci_save_state(pdev);
8314
8315 /* Initialize device or resume if in suspended state */
8316 rc = pci_enable_device(pdev);
8317 if (rc) {
8318 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
8319 "device after reset\n", ha->host_no, __func__);
8320 goto exit_slot_reset;
8321 }
8322
8323 ha->isp_ops->disable_intrs(ha);
8324
8325 if (is_qla80XX(ha)) {
8326 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
8327 ret = PCI_ERS_RESULT_RECOVERED;
8328 goto exit_slot_reset;
8329 } else
8330 goto exit_slot_reset;
8331 }
8332
8333 exit_slot_reset:
8334 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
8335 "device after reset\n", ha->host_no, __func__, ret);
8336 return ret;
8337 }
8338
8339 static void
8340 qla4xxx_pci_resume(struct pci_dev *pdev)
8341 {
8342 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8343 int ret;
8344
8345 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
8346 ha->host_no, __func__);
8347
8348 ret = qla4xxx_wait_for_hba_online(ha);
8349 if (ret != QLA_SUCCESS) {
8350 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
8351 "resume I/O from slot/link_reset\n", ha->host_no,
8352 __func__);
8353 }
8354
8355 pci_cleanup_aer_uncorrect_error_status(pdev);
8356 clear_bit(AF_EEH_BUSY, &ha->flags);
8357 }
8358
8359 static const struct pci_error_handlers qla4xxx_err_handler = {
8360 .error_detected = qla4xxx_pci_error_detected,
8361 .mmio_enabled = qla4xxx_pci_mmio_enabled,
8362 .slot_reset = qla4xxx_pci_slot_reset,
8363 .resume = qla4xxx_pci_resume,
8364 };
8365
8366 static struct pci_device_id qla4xxx_pci_tbl[] = {
8367 {
8368 .vendor = PCI_VENDOR_ID_QLOGIC,
8369 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
8370 .subvendor = PCI_ANY_ID,
8371 .subdevice = PCI_ANY_ID,
8372 },
8373 {
8374 .vendor = PCI_VENDOR_ID_QLOGIC,
8375 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
8376 .subvendor = PCI_ANY_ID,
8377 .subdevice = PCI_ANY_ID,
8378 },
8379 {
8380 .vendor = PCI_VENDOR_ID_QLOGIC,
8381 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
8382 .subvendor = PCI_ANY_ID,
8383 .subdevice = PCI_ANY_ID,
8384 },
8385 {
8386 .vendor = PCI_VENDOR_ID_QLOGIC,
8387 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
8388 .subvendor = PCI_ANY_ID,
8389 .subdevice = PCI_ANY_ID,
8390 },
8391 {
8392 .vendor = PCI_VENDOR_ID_QLOGIC,
8393 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
8394 .subvendor = PCI_ANY_ID,
8395 .subdevice = PCI_ANY_ID,
8396 },
8397 {
8398 .vendor = PCI_VENDOR_ID_QLOGIC,
8399 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
8400 .subvendor = PCI_ANY_ID,
8401 .subdevice = PCI_ANY_ID,
8402 },
8403 {0, 0},
8404 };
8405 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
8406
8407 static struct pci_driver qla4xxx_pci_driver = {
8408 .name = DRIVER_NAME,
8409 .id_table = qla4xxx_pci_tbl,
8410 .probe = qla4xxx_probe_adapter,
8411 .remove = qla4xxx_remove_adapter,
8412 .err_handler = &qla4xxx_err_handler,
8413 };
8414
8415 static int __init qla4xxx_module_init(void)
8416 {
8417 int ret;
8418
8419 /* Allocate cache for SRBs. */
8420 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
8421 SLAB_HWCACHE_ALIGN, NULL);
8422 if (srb_cachep == NULL) {
8423 printk(KERN_ERR
8424 "%s: Unable to allocate SRB cache..."
8425 "Failing load!\n", DRIVER_NAME);
8426 ret = -ENOMEM;
8427 goto no_srp_cache;
8428 }
8429
8430 /* Derive version string. */
8431 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
8432 if (ql4xextended_error_logging)
8433 strcat(qla4xxx_version_str, "-debug");
8434
8435 qla4xxx_scsi_transport =
8436 iscsi_register_transport(&qla4xxx_iscsi_transport);
8437 if (!qla4xxx_scsi_transport){
8438 ret = -ENODEV;
8439 goto release_srb_cache;
8440 }
8441
8442 ret = pci_register_driver(&qla4xxx_pci_driver);
8443 if (ret)
8444 goto unregister_transport;
8445
8446 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
8447 return 0;
8448
8449 unregister_transport:
8450 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8451 release_srb_cache:
8452 kmem_cache_destroy(srb_cachep);
8453 no_srp_cache:
8454 return ret;
8455 }
8456
8457 static void __exit qla4xxx_module_exit(void)
8458 {
8459 pci_unregister_driver(&qla4xxx_pci_driver);
8460 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8461 kmem_cache_destroy(srb_cachep);
8462 }
8463
8464 module_init(qla4xxx_module_init);
8465 module_exit(qla4xxx_module_exit);
8466
8467 MODULE_AUTHOR("QLogic Corporation");
8468 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
8469 MODULE_LICENSE("GPL");
8470 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);