[SCSI] qla4xxx: Populate local CHAP credentials for flash target sessions
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / scsi / qla4xxx / ql4_os.c
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22
23 /*
24 * Driver version
25 */
26 static char qla4xxx_version_str[40];
27
28 /*
29 * SRB allocation cache
30 */
31 static struct kmem_cache *srb_cachep;
32
33 /*
34 * Module parameter information and variables
35 */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 " Set to disable exporting boot targets to sysfs.\n"
40 "\t\t 0 - Export boot targets\n"
41 "\t\t 1 - Do not export boot targets (Default)");
42
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 " Don't reset the HBA for driver recovery.\n"
47 "\t\t 0 - It will reset HBA (Default)\n"
48 "\t\t 1 - It will NOT reset HBA");
49
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 " Option to enable extended error logging.\n"
54 "\t\t 0 - no logging (Default)\n"
55 "\t\t 2 - debug logging");
56
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 "\t\t 0 = enable INTx interrupt mechanism.\n"
62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
63 "\t\t 2 = enable MSI interrupt mechanism.");
64
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 " Maximum queue depth to report for target devices.\n"
70 "\t\t Default: 32.");
71
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 " Enable or disable dynamic tracking and adjustment of\n"
76 "\t\t scsi device queue depth.\n"
77 "\t\t 0 - Disable.\n"
78 "\t\t 1 - Enable. (Default)");
79
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec.");
85
86 int ql4xmdcapmask = 0x1F;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0x1F.\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
92
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 " Set to enable minidump.\n"
97 "\t\t 0 - disable minidump\n"
98 "\t\t 1 - enable minidump (Default)");
99
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102 * SCSI host template entry points
103 */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105
106 /*
107 * iSCSI template entry points
108 */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 enum iscsi_param_type param_type,
119 int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 struct sockaddr *dst_addr,
123 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 struct iscsi_cls_conn *cls_conn,
133 uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 uint32_t iface_type, uint32_t payload_size,
148 uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152
153 /*
154 * SCSI host template entry points
155 */
156 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
161 static int qla4xxx_slave_alloc(struct scsi_device *device);
162 static int qla4xxx_slave_configure(struct scsi_device *device);
163 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
164 static umode_t qla4_attr_is_visible(int param_type, int param);
165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
166 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
167 int reason);
168
169 /*
170 * iSCSI Flash DDB sysfs entry points
171 */
172 static int
173 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
174 struct iscsi_bus_flash_conn *fnode_conn,
175 void *data, int len);
176 static int
177 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
178 int param, char *buf);
179 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
180 int len);
181 static int
182 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
183 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
184 struct iscsi_bus_flash_conn *fnode_conn);
185 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
186 struct iscsi_bus_flash_conn *fnode_conn);
187 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188
189 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
190 QLA82XX_LEGACY_INTR_CONFIG;
191
192 static struct scsi_host_template qla4xxx_driver_template = {
193 .module = THIS_MODULE,
194 .name = DRIVER_NAME,
195 .proc_name = DRIVER_NAME,
196 .queuecommand = qla4xxx_queuecommand,
197
198 .eh_abort_handler = qla4xxx_eh_abort,
199 .eh_device_reset_handler = qla4xxx_eh_device_reset,
200 .eh_target_reset_handler = qla4xxx_eh_target_reset,
201 .eh_host_reset_handler = qla4xxx_eh_host_reset,
202 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
203
204 .slave_configure = qla4xxx_slave_configure,
205 .slave_alloc = qla4xxx_slave_alloc,
206 .slave_destroy = qla4xxx_slave_destroy,
207 .change_queue_depth = qla4xxx_change_queue_depth,
208
209 .this_id = -1,
210 .cmd_per_lun = 3,
211 .use_clustering = ENABLE_CLUSTERING,
212 .sg_tablesize = SG_ALL,
213
214 .max_sectors = 0xFFFF,
215 .shost_attrs = qla4xxx_host_attrs,
216 .host_reset = qla4xxx_host_reset,
217 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
218 };
219
220 static struct iscsi_transport qla4xxx_iscsi_transport = {
221 .owner = THIS_MODULE,
222 .name = DRIVER_NAME,
223 .caps = CAP_TEXT_NEGO |
224 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
225 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
226 CAP_MULTI_R2T,
227 .attr_is_visible = qla4_attr_is_visible,
228 .create_session = qla4xxx_session_create,
229 .destroy_session = qla4xxx_session_destroy,
230 .start_conn = qla4xxx_conn_start,
231 .create_conn = qla4xxx_conn_create,
232 .bind_conn = qla4xxx_conn_bind,
233 .stop_conn = iscsi_conn_stop,
234 .destroy_conn = qla4xxx_conn_destroy,
235 .set_param = iscsi_set_param,
236 .get_conn_param = qla4xxx_conn_get_param,
237 .get_session_param = qla4xxx_session_get_param,
238 .get_ep_param = qla4xxx_get_ep_param,
239 .ep_connect = qla4xxx_ep_connect,
240 .ep_poll = qla4xxx_ep_poll,
241 .ep_disconnect = qla4xxx_ep_disconnect,
242 .get_stats = qla4xxx_conn_get_stats,
243 .send_pdu = iscsi_conn_send_pdu,
244 .xmit_task = qla4xxx_task_xmit,
245 .cleanup_task = qla4xxx_task_cleanup,
246 .alloc_pdu = qla4xxx_alloc_pdu,
247
248 .get_host_param = qla4xxx_host_get_param,
249 .set_iface_param = qla4xxx_iface_set_param,
250 .get_iface_param = qla4xxx_get_iface_param,
251 .bsg_request = qla4xxx_bsg_request,
252 .send_ping = qla4xxx_send_ping,
253 .get_chap = qla4xxx_get_chap_list,
254 .delete_chap = qla4xxx_delete_chap,
255 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
256 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
257 .new_flashnode = qla4xxx_sysfs_ddb_add,
258 .del_flashnode = qla4xxx_sysfs_ddb_delete,
259 .login_flashnode = qla4xxx_sysfs_ddb_login,
260 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
261 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
262 };
263
264 static struct scsi_transport_template *qla4xxx_scsi_transport;
265
266 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
267 uint32_t iface_type, uint32_t payload_size,
268 uint32_t pid, struct sockaddr *dst_addr)
269 {
270 struct scsi_qla_host *ha = to_qla_host(shost);
271 struct sockaddr_in *addr;
272 struct sockaddr_in6 *addr6;
273 uint32_t options = 0;
274 uint8_t ipaddr[IPv6_ADDR_LEN];
275 int rval;
276
277 memset(ipaddr, 0, IPv6_ADDR_LEN);
278 /* IPv4 to IPv4 */
279 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
280 (dst_addr->sa_family == AF_INET)) {
281 addr = (struct sockaddr_in *)dst_addr;
282 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
283 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
284 "dest: %pI4\n", __func__,
285 &ha->ip_config.ip_address, ipaddr));
286 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
287 ipaddr);
288 if (rval)
289 rval = -EINVAL;
290 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
291 (dst_addr->sa_family == AF_INET6)) {
292 /* IPv6 to IPv6 */
293 addr6 = (struct sockaddr_in6 *)dst_addr;
294 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
295
296 options |= PING_IPV6_PROTOCOL_ENABLE;
297
298 /* Ping using LinkLocal address */
299 if ((iface_num == 0) || (iface_num == 1)) {
300 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
301 "src: %pI6 dest: %pI6\n", __func__,
302 &ha->ip_config.ipv6_link_local_addr,
303 ipaddr));
304 options |= PING_IPV6_LINKLOCAL_ADDR;
305 rval = qla4xxx_ping_iocb(ha, options, payload_size,
306 pid, ipaddr);
307 } else {
308 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
309 "not supported\n", __func__, iface_num);
310 rval = -ENOSYS;
311 goto exit_send_ping;
312 }
313
314 /*
315 * If ping using LinkLocal address fails, try ping using
316 * IPv6 address
317 */
318 if (rval != QLA_SUCCESS) {
319 options &= ~PING_IPV6_LINKLOCAL_ADDR;
320 if (iface_num == 0) {
321 options |= PING_IPV6_ADDR0;
322 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
323 "Ping src: %pI6 "
324 "dest: %pI6\n", __func__,
325 &ha->ip_config.ipv6_addr0,
326 ipaddr));
327 } else if (iface_num == 1) {
328 options |= PING_IPV6_ADDR1;
329 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
330 "Ping src: %pI6 "
331 "dest: %pI6\n", __func__,
332 &ha->ip_config.ipv6_addr1,
333 ipaddr));
334 }
335 rval = qla4xxx_ping_iocb(ha, options, payload_size,
336 pid, ipaddr);
337 if (rval)
338 rval = -EINVAL;
339 }
340 } else
341 rval = -ENOSYS;
342 exit_send_ping:
343 return rval;
344 }
345
346 static umode_t qla4_attr_is_visible(int param_type, int param)
347 {
348 switch (param_type) {
349 case ISCSI_HOST_PARAM:
350 switch (param) {
351 case ISCSI_HOST_PARAM_HWADDRESS:
352 case ISCSI_HOST_PARAM_IPADDRESS:
353 case ISCSI_HOST_PARAM_INITIATOR_NAME:
354 case ISCSI_HOST_PARAM_PORT_STATE:
355 case ISCSI_HOST_PARAM_PORT_SPEED:
356 return S_IRUGO;
357 default:
358 return 0;
359 }
360 case ISCSI_PARAM:
361 switch (param) {
362 case ISCSI_PARAM_PERSISTENT_ADDRESS:
363 case ISCSI_PARAM_PERSISTENT_PORT:
364 case ISCSI_PARAM_CONN_ADDRESS:
365 case ISCSI_PARAM_CONN_PORT:
366 case ISCSI_PARAM_TARGET_NAME:
367 case ISCSI_PARAM_TPGT:
368 case ISCSI_PARAM_TARGET_ALIAS:
369 case ISCSI_PARAM_MAX_BURST:
370 case ISCSI_PARAM_MAX_R2T:
371 case ISCSI_PARAM_FIRST_BURST:
372 case ISCSI_PARAM_MAX_RECV_DLENGTH:
373 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
374 case ISCSI_PARAM_IFACE_NAME:
375 case ISCSI_PARAM_CHAP_OUT_IDX:
376 case ISCSI_PARAM_CHAP_IN_IDX:
377 case ISCSI_PARAM_USERNAME:
378 case ISCSI_PARAM_PASSWORD:
379 case ISCSI_PARAM_USERNAME_IN:
380 case ISCSI_PARAM_PASSWORD_IN:
381 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
382 case ISCSI_PARAM_DISCOVERY_SESS:
383 case ISCSI_PARAM_PORTAL_TYPE:
384 case ISCSI_PARAM_CHAP_AUTH_EN:
385 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
386 case ISCSI_PARAM_BIDI_CHAP_EN:
387 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
388 case ISCSI_PARAM_DEF_TIME2WAIT:
389 case ISCSI_PARAM_DEF_TIME2RETAIN:
390 case ISCSI_PARAM_HDRDGST_EN:
391 case ISCSI_PARAM_DATADGST_EN:
392 case ISCSI_PARAM_INITIAL_R2T_EN:
393 case ISCSI_PARAM_IMM_DATA_EN:
394 case ISCSI_PARAM_PDU_INORDER_EN:
395 case ISCSI_PARAM_DATASEQ_INORDER_EN:
396 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
397 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
398 case ISCSI_PARAM_TCP_WSF_DISABLE:
399 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
400 case ISCSI_PARAM_TCP_TIMER_SCALE:
401 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
402 case ISCSI_PARAM_TCP_XMIT_WSF:
403 case ISCSI_PARAM_TCP_RECV_WSF:
404 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
405 case ISCSI_PARAM_IPV4_TOS:
406 case ISCSI_PARAM_IPV6_TC:
407 case ISCSI_PARAM_IPV6_FLOW_LABEL:
408 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
409 case ISCSI_PARAM_KEEPALIVE_TMO:
410 case ISCSI_PARAM_LOCAL_PORT:
411 case ISCSI_PARAM_ISID:
412 case ISCSI_PARAM_TSID:
413 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
414 case ISCSI_PARAM_ERL:
415 case ISCSI_PARAM_STATSN:
416 case ISCSI_PARAM_EXP_STATSN:
417 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
418 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
419 return S_IRUGO;
420 default:
421 return 0;
422 }
423 case ISCSI_NET_PARAM:
424 switch (param) {
425 case ISCSI_NET_PARAM_IPV4_ADDR:
426 case ISCSI_NET_PARAM_IPV4_SUBNET:
427 case ISCSI_NET_PARAM_IPV4_GW:
428 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
429 case ISCSI_NET_PARAM_IFACE_ENABLE:
430 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
431 case ISCSI_NET_PARAM_IPV6_ADDR:
432 case ISCSI_NET_PARAM_IPV6_ROUTER:
433 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
434 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
435 case ISCSI_NET_PARAM_VLAN_ID:
436 case ISCSI_NET_PARAM_VLAN_PRIORITY:
437 case ISCSI_NET_PARAM_VLAN_ENABLED:
438 case ISCSI_NET_PARAM_MTU:
439 case ISCSI_NET_PARAM_PORT:
440 return S_IRUGO;
441 default:
442 return 0;
443 }
444 case ISCSI_FLASHNODE_PARAM:
445 switch (param) {
446 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
447 case ISCSI_FLASHNODE_PORTAL_TYPE:
448 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
449 case ISCSI_FLASHNODE_DISCOVERY_SESS:
450 case ISCSI_FLASHNODE_ENTRY_EN:
451 case ISCSI_FLASHNODE_HDR_DGST_EN:
452 case ISCSI_FLASHNODE_DATA_DGST_EN:
453 case ISCSI_FLASHNODE_IMM_DATA_EN:
454 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
455 case ISCSI_FLASHNODE_DATASEQ_INORDER:
456 case ISCSI_FLASHNODE_PDU_INORDER:
457 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
458 case ISCSI_FLASHNODE_SNACK_REQ_EN:
459 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
460 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
461 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
462 case ISCSI_FLASHNODE_ERL:
463 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
464 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
465 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
466 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
467 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
468 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
469 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
470 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
471 case ISCSI_FLASHNODE_FIRST_BURST:
472 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
473 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
474 case ISCSI_FLASHNODE_MAX_R2T:
475 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
476 case ISCSI_FLASHNODE_ISID:
477 case ISCSI_FLASHNODE_TSID:
478 case ISCSI_FLASHNODE_PORT:
479 case ISCSI_FLASHNODE_MAX_BURST:
480 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
481 case ISCSI_FLASHNODE_IPADDR:
482 case ISCSI_FLASHNODE_ALIAS:
483 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
484 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
485 case ISCSI_FLASHNODE_LOCAL_PORT:
486 case ISCSI_FLASHNODE_IPV4_TOS:
487 case ISCSI_FLASHNODE_IPV6_TC:
488 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
489 case ISCSI_FLASHNODE_NAME:
490 case ISCSI_FLASHNODE_TPGT:
491 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
492 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
493 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
494 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
495 case ISCSI_FLASHNODE_TCP_RECV_WSF:
496 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
497 case ISCSI_FLASHNODE_USERNAME:
498 case ISCSI_FLASHNODE_PASSWORD:
499 case ISCSI_FLASHNODE_STATSN:
500 case ISCSI_FLASHNODE_EXP_STATSN:
501 case ISCSI_FLASHNODE_IS_BOOT_TGT:
502 return S_IRUGO;
503 default:
504 return 0;
505 }
506 }
507
508 return 0;
509 }
510
511 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
512 uint32_t *num_entries, char *buf)
513 {
514 struct scsi_qla_host *ha = to_qla_host(shost);
515 struct ql4_chap_table *chap_table;
516 struct iscsi_chap_rec *chap_rec;
517 int max_chap_entries = 0;
518 int valid_chap_entries = 0;
519 int ret = 0, i;
520
521 if (is_qla80XX(ha))
522 max_chap_entries = (ha->hw.flt_chap_size / 2) /
523 sizeof(struct ql4_chap_table);
524 else
525 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
526
527 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
528 __func__, *num_entries, chap_tbl_idx);
529
530 if (!buf) {
531 ret = -ENOMEM;
532 goto exit_get_chap_list;
533 }
534
535 chap_rec = (struct iscsi_chap_rec *) buf;
536 mutex_lock(&ha->chap_sem);
537 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
538 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
539 if (chap_table->cookie !=
540 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
541 continue;
542
543 chap_rec->chap_tbl_idx = i;
544 strncpy(chap_rec->username, chap_table->name,
545 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
546 strncpy(chap_rec->password, chap_table->secret,
547 QL4_CHAP_MAX_SECRET_LEN);
548 chap_rec->password_length = chap_table->secret_len;
549
550 if (chap_table->flags & BIT_7) /* local */
551 chap_rec->chap_type = CHAP_TYPE_OUT;
552
553 if (chap_table->flags & BIT_6) /* peer */
554 chap_rec->chap_type = CHAP_TYPE_IN;
555
556 chap_rec++;
557
558 valid_chap_entries++;
559 if (valid_chap_entries == *num_entries)
560 break;
561 else
562 continue;
563 }
564 mutex_unlock(&ha->chap_sem);
565
566 exit_get_chap_list:
567 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
568 __func__, valid_chap_entries);
569 *num_entries = valid_chap_entries;
570 return ret;
571 }
572
573 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
574 {
575 int ret = 0;
576 uint16_t *chap_tbl_idx = (uint16_t *) data;
577 struct iscsi_cls_session *cls_session;
578 struct iscsi_session *sess;
579 struct ddb_entry *ddb_entry;
580
581 if (!iscsi_is_session_dev(dev))
582 goto exit_is_chap_active;
583
584 cls_session = iscsi_dev_to_session(dev);
585 sess = cls_session->dd_data;
586 ddb_entry = sess->dd_data;
587
588 if (iscsi_session_chkready(cls_session))
589 goto exit_is_chap_active;
590
591 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
592 ret = 1;
593
594 exit_is_chap_active:
595 return ret;
596 }
597
598 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
599 uint16_t chap_tbl_idx)
600 {
601 int ret = 0;
602
603 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
604 __qla4xxx_is_chap_active);
605
606 return ret;
607 }
608
609 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
610 {
611 struct scsi_qla_host *ha = to_qla_host(shost);
612 struct ql4_chap_table *chap_table;
613 dma_addr_t chap_dma;
614 int max_chap_entries = 0;
615 uint32_t offset = 0;
616 uint32_t chap_size;
617 int ret = 0;
618
619 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
620 if (chap_table == NULL)
621 return -ENOMEM;
622
623 memset(chap_table, 0, sizeof(struct ql4_chap_table));
624
625 if (is_qla80XX(ha))
626 max_chap_entries = (ha->hw.flt_chap_size / 2) /
627 sizeof(struct ql4_chap_table);
628 else
629 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
630
631 if (chap_tbl_idx > max_chap_entries) {
632 ret = -EINVAL;
633 goto exit_delete_chap;
634 }
635
636 /* Check if chap index is in use.
637 * If chap is in use don't delet chap entry */
638 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
639 if (ret) {
640 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
641 "delete from flash\n", chap_tbl_idx);
642 ret = -EBUSY;
643 goto exit_delete_chap;
644 }
645
646 chap_size = sizeof(struct ql4_chap_table);
647 if (is_qla40XX(ha))
648 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
649 else {
650 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
651 /* flt_chap_size is CHAP table size for both ports
652 * so divide it by 2 to calculate the offset for second port
653 */
654 if (ha->port_num == 1)
655 offset += (ha->hw.flt_chap_size / 2);
656 offset += (chap_tbl_idx * chap_size);
657 }
658
659 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
660 if (ret != QLA_SUCCESS) {
661 ret = -EINVAL;
662 goto exit_delete_chap;
663 }
664
665 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
666 __le16_to_cpu(chap_table->cookie)));
667
668 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
669 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
670 goto exit_delete_chap;
671 }
672
673 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
674
675 offset = FLASH_CHAP_OFFSET |
676 (chap_tbl_idx * sizeof(struct ql4_chap_table));
677 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
678 FLASH_OPT_RMW_COMMIT);
679 if (ret == QLA_SUCCESS && ha->chap_list) {
680 mutex_lock(&ha->chap_sem);
681 /* Update ha chap_list cache */
682 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
683 chap_table, sizeof(struct ql4_chap_table));
684 mutex_unlock(&ha->chap_sem);
685 }
686 if (ret != QLA_SUCCESS)
687 ret = -EINVAL;
688
689 exit_delete_chap:
690 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
691 return ret;
692 }
693
694 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
695 enum iscsi_param_type param_type,
696 int param, char *buf)
697 {
698 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
699 struct scsi_qla_host *ha = to_qla_host(shost);
700 int len = -ENOSYS;
701
702 if (param_type != ISCSI_NET_PARAM)
703 return -ENOSYS;
704
705 switch (param) {
706 case ISCSI_NET_PARAM_IPV4_ADDR:
707 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
708 break;
709 case ISCSI_NET_PARAM_IPV4_SUBNET:
710 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
711 break;
712 case ISCSI_NET_PARAM_IPV4_GW:
713 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
714 break;
715 case ISCSI_NET_PARAM_IFACE_ENABLE:
716 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
717 len = sprintf(buf, "%s\n",
718 (ha->ip_config.ipv4_options &
719 IPOPT_IPV4_PROTOCOL_ENABLE) ?
720 "enabled" : "disabled");
721 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
722 len = sprintf(buf, "%s\n",
723 (ha->ip_config.ipv6_options &
724 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
725 "enabled" : "disabled");
726 break;
727 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
728 len = sprintf(buf, "%s\n",
729 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
730 "dhcp" : "static");
731 break;
732 case ISCSI_NET_PARAM_IPV6_ADDR:
733 if (iface->iface_num == 0)
734 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
735 if (iface->iface_num == 1)
736 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
737 break;
738 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
739 len = sprintf(buf, "%pI6\n",
740 &ha->ip_config.ipv6_link_local_addr);
741 break;
742 case ISCSI_NET_PARAM_IPV6_ROUTER:
743 len = sprintf(buf, "%pI6\n",
744 &ha->ip_config.ipv6_default_router_addr);
745 break;
746 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
747 len = sprintf(buf, "%s\n",
748 (ha->ip_config.ipv6_addl_options &
749 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
750 "nd" : "static");
751 break;
752 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
753 len = sprintf(buf, "%s\n",
754 (ha->ip_config.ipv6_addl_options &
755 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
756 "auto" : "static");
757 break;
758 case ISCSI_NET_PARAM_VLAN_ID:
759 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
760 len = sprintf(buf, "%d\n",
761 (ha->ip_config.ipv4_vlan_tag &
762 ISCSI_MAX_VLAN_ID));
763 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
764 len = sprintf(buf, "%d\n",
765 (ha->ip_config.ipv6_vlan_tag &
766 ISCSI_MAX_VLAN_ID));
767 break;
768 case ISCSI_NET_PARAM_VLAN_PRIORITY:
769 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
770 len = sprintf(buf, "%d\n",
771 ((ha->ip_config.ipv4_vlan_tag >> 13) &
772 ISCSI_MAX_VLAN_PRIORITY));
773 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
774 len = sprintf(buf, "%d\n",
775 ((ha->ip_config.ipv6_vlan_tag >> 13) &
776 ISCSI_MAX_VLAN_PRIORITY));
777 break;
778 case ISCSI_NET_PARAM_VLAN_ENABLED:
779 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
780 len = sprintf(buf, "%s\n",
781 (ha->ip_config.ipv4_options &
782 IPOPT_VLAN_TAGGING_ENABLE) ?
783 "enabled" : "disabled");
784 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
785 len = sprintf(buf, "%s\n",
786 (ha->ip_config.ipv6_options &
787 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
788 "enabled" : "disabled");
789 break;
790 case ISCSI_NET_PARAM_MTU:
791 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
792 break;
793 case ISCSI_NET_PARAM_PORT:
794 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
795 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
796 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
797 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
798 break;
799 default:
800 len = -ENOSYS;
801 }
802
803 return len;
804 }
805
806 static struct iscsi_endpoint *
807 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
808 int non_blocking)
809 {
810 int ret;
811 struct iscsi_endpoint *ep;
812 struct qla_endpoint *qla_ep;
813 struct scsi_qla_host *ha;
814 struct sockaddr_in *addr;
815 struct sockaddr_in6 *addr6;
816
817 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
818 if (!shost) {
819 ret = -ENXIO;
820 printk(KERN_ERR "%s: shost is NULL\n",
821 __func__);
822 return ERR_PTR(ret);
823 }
824
825 ha = iscsi_host_priv(shost);
826
827 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
828 if (!ep) {
829 ret = -ENOMEM;
830 return ERR_PTR(ret);
831 }
832
833 qla_ep = ep->dd_data;
834 memset(qla_ep, 0, sizeof(struct qla_endpoint));
835 if (dst_addr->sa_family == AF_INET) {
836 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
837 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
838 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
839 (char *)&addr->sin_addr));
840 } else if (dst_addr->sa_family == AF_INET6) {
841 memcpy(&qla_ep->dst_addr, dst_addr,
842 sizeof(struct sockaddr_in6));
843 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
844 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
845 (char *)&addr6->sin6_addr));
846 }
847
848 qla_ep->host = shost;
849
850 return ep;
851 }
852
853 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
854 {
855 struct qla_endpoint *qla_ep;
856 struct scsi_qla_host *ha;
857 int ret = 0;
858
859 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
860 qla_ep = ep->dd_data;
861 ha = to_qla_host(qla_ep->host);
862
863 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
864 ret = 1;
865
866 return ret;
867 }
868
869 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
870 {
871 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
872 iscsi_destroy_endpoint(ep);
873 }
874
875 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
876 enum iscsi_param param,
877 char *buf)
878 {
879 struct qla_endpoint *qla_ep = ep->dd_data;
880 struct sockaddr *dst_addr;
881
882 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
883
884 switch (param) {
885 case ISCSI_PARAM_CONN_PORT:
886 case ISCSI_PARAM_CONN_ADDRESS:
887 if (!qla_ep)
888 return -ENOTCONN;
889
890 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
891 if (!dst_addr)
892 return -ENOTCONN;
893
894 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
895 &qla_ep->dst_addr, param, buf);
896 default:
897 return -ENOSYS;
898 }
899 }
900
901 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
902 struct iscsi_stats *stats)
903 {
904 struct iscsi_session *sess;
905 struct iscsi_cls_session *cls_sess;
906 struct ddb_entry *ddb_entry;
907 struct scsi_qla_host *ha;
908 struct ql_iscsi_stats *ql_iscsi_stats;
909 int stats_size;
910 int ret;
911 dma_addr_t iscsi_stats_dma;
912
913 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
914
915 cls_sess = iscsi_conn_to_session(cls_conn);
916 sess = cls_sess->dd_data;
917 ddb_entry = sess->dd_data;
918 ha = ddb_entry->ha;
919
920 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
921 /* Allocate memory */
922 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
923 &iscsi_stats_dma, GFP_KERNEL);
924 if (!ql_iscsi_stats) {
925 ql4_printk(KERN_ERR, ha,
926 "Unable to allocate memory for iscsi stats\n");
927 goto exit_get_stats;
928 }
929
930 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
931 iscsi_stats_dma);
932 if (ret != QLA_SUCCESS) {
933 ql4_printk(KERN_ERR, ha,
934 "Unable to retrieve iscsi stats\n");
935 goto free_stats;
936 }
937
938 /* octets */
939 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
940 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
941 /* xmit pdus */
942 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
943 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
944 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
945 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
946 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
947 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
948 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
949 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
950 /* recv pdus */
951 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
952 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
953 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
954 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
955 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
956 stats->logoutrsp_pdus =
957 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
958 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
959 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
960 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
961
962 free_stats:
963 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
964 iscsi_stats_dma);
965 exit_get_stats:
966 return;
967 }
968
969 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
970 {
971 struct iscsi_cls_session *session;
972 struct iscsi_session *sess;
973 unsigned long flags;
974 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
975
976 session = starget_to_session(scsi_target(sc->device));
977 sess = session->dd_data;
978
979 spin_lock_irqsave(&session->lock, flags);
980 if (session->state == ISCSI_SESSION_FAILED)
981 ret = BLK_EH_RESET_TIMER;
982 spin_unlock_irqrestore(&session->lock, flags);
983
984 return ret;
985 }
986
987 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
988 {
989 struct scsi_qla_host *ha = to_qla_host(shost);
990 struct iscsi_cls_host *ihost = shost->shost_data;
991 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
992
993 qla4xxx_get_firmware_state(ha);
994
995 switch (ha->addl_fw_state & 0x0F00) {
996 case FW_ADDSTATE_LINK_SPEED_10MBPS:
997 speed = ISCSI_PORT_SPEED_10MBPS;
998 break;
999 case FW_ADDSTATE_LINK_SPEED_100MBPS:
1000 speed = ISCSI_PORT_SPEED_100MBPS;
1001 break;
1002 case FW_ADDSTATE_LINK_SPEED_1GBPS:
1003 speed = ISCSI_PORT_SPEED_1GBPS;
1004 break;
1005 case FW_ADDSTATE_LINK_SPEED_10GBPS:
1006 speed = ISCSI_PORT_SPEED_10GBPS;
1007 break;
1008 }
1009 ihost->port_speed = speed;
1010 }
1011
1012 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1013 {
1014 struct scsi_qla_host *ha = to_qla_host(shost);
1015 struct iscsi_cls_host *ihost = shost->shost_data;
1016 uint32_t state = ISCSI_PORT_STATE_DOWN;
1017
1018 if (test_bit(AF_LINK_UP, &ha->flags))
1019 state = ISCSI_PORT_STATE_UP;
1020
1021 ihost->port_state = state;
1022 }
1023
1024 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1025 enum iscsi_host_param param, char *buf)
1026 {
1027 struct scsi_qla_host *ha = to_qla_host(shost);
1028 int len;
1029
1030 switch (param) {
1031 case ISCSI_HOST_PARAM_HWADDRESS:
1032 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1033 break;
1034 case ISCSI_HOST_PARAM_IPADDRESS:
1035 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1036 break;
1037 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1038 len = sprintf(buf, "%s\n", ha->name_string);
1039 break;
1040 case ISCSI_HOST_PARAM_PORT_STATE:
1041 qla4xxx_set_port_state(shost);
1042 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1043 break;
1044 case ISCSI_HOST_PARAM_PORT_SPEED:
1045 qla4xxx_set_port_speed(shost);
1046 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1047 break;
1048 default:
1049 return -ENOSYS;
1050 }
1051
1052 return len;
1053 }
1054
1055 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1056 {
1057 if (ha->iface_ipv4)
1058 return;
1059
1060 /* IPv4 */
1061 ha->iface_ipv4 = iscsi_create_iface(ha->host,
1062 &qla4xxx_iscsi_transport,
1063 ISCSI_IFACE_TYPE_IPV4, 0, 0);
1064 if (!ha->iface_ipv4)
1065 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1066 "iface0.\n");
1067 }
1068
1069 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1070 {
1071 if (!ha->iface_ipv6_0)
1072 /* IPv6 iface-0 */
1073 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1074 &qla4xxx_iscsi_transport,
1075 ISCSI_IFACE_TYPE_IPV6, 0,
1076 0);
1077 if (!ha->iface_ipv6_0)
1078 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1079 "iface0.\n");
1080
1081 if (!ha->iface_ipv6_1)
1082 /* IPv6 iface-1 */
1083 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1084 &qla4xxx_iscsi_transport,
1085 ISCSI_IFACE_TYPE_IPV6, 1,
1086 0);
1087 if (!ha->iface_ipv6_1)
1088 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1089 "iface1.\n");
1090 }
1091
1092 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1093 {
1094 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1095 qla4xxx_create_ipv4_iface(ha);
1096
1097 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1098 qla4xxx_create_ipv6_iface(ha);
1099 }
1100
1101 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1102 {
1103 if (ha->iface_ipv4) {
1104 iscsi_destroy_iface(ha->iface_ipv4);
1105 ha->iface_ipv4 = NULL;
1106 }
1107 }
1108
1109 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1110 {
1111 if (ha->iface_ipv6_0) {
1112 iscsi_destroy_iface(ha->iface_ipv6_0);
1113 ha->iface_ipv6_0 = NULL;
1114 }
1115 if (ha->iface_ipv6_1) {
1116 iscsi_destroy_iface(ha->iface_ipv6_1);
1117 ha->iface_ipv6_1 = NULL;
1118 }
1119 }
1120
1121 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1122 {
1123 qla4xxx_destroy_ipv4_iface(ha);
1124 qla4xxx_destroy_ipv6_iface(ha);
1125 }
1126
1127 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1128 struct iscsi_iface_param_info *iface_param,
1129 struct addr_ctrl_blk *init_fw_cb)
1130 {
1131 /*
1132 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1133 * iface_num 1 is valid only for IPv6 Addr.
1134 */
1135 switch (iface_param->param) {
1136 case ISCSI_NET_PARAM_IPV6_ADDR:
1137 if (iface_param->iface_num & 0x1)
1138 /* IPv6 Addr 1 */
1139 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1140 sizeof(init_fw_cb->ipv6_addr1));
1141 else
1142 /* IPv6 Addr 0 */
1143 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1144 sizeof(init_fw_cb->ipv6_addr0));
1145 break;
1146 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1147 if (iface_param->iface_num & 0x1)
1148 break;
1149 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1150 sizeof(init_fw_cb->ipv6_if_id));
1151 break;
1152 case ISCSI_NET_PARAM_IPV6_ROUTER:
1153 if (iface_param->iface_num & 0x1)
1154 break;
1155 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1156 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1157 break;
1158 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1159 /* Autocfg applies to even interface */
1160 if (iface_param->iface_num & 0x1)
1161 break;
1162
1163 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1164 init_fw_cb->ipv6_addtl_opts &=
1165 cpu_to_le16(
1166 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1167 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1168 init_fw_cb->ipv6_addtl_opts |=
1169 cpu_to_le16(
1170 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1171 else
1172 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1173 "IPv6 addr\n");
1174 break;
1175 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1176 /* Autocfg applies to even interface */
1177 if (iface_param->iface_num & 0x1)
1178 break;
1179
1180 if (iface_param->value[0] ==
1181 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1182 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1183 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1184 else if (iface_param->value[0] ==
1185 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1186 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1187 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1188 else
1189 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1190 "IPv6 linklocal addr\n");
1191 break;
1192 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1193 /* Autocfg applies to even interface */
1194 if (iface_param->iface_num & 0x1)
1195 break;
1196
1197 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1198 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1199 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1200 break;
1201 case ISCSI_NET_PARAM_IFACE_ENABLE:
1202 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1203 init_fw_cb->ipv6_opts |=
1204 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1205 qla4xxx_create_ipv6_iface(ha);
1206 } else {
1207 init_fw_cb->ipv6_opts &=
1208 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1209 0xFFFF);
1210 qla4xxx_destroy_ipv6_iface(ha);
1211 }
1212 break;
1213 case ISCSI_NET_PARAM_VLAN_TAG:
1214 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1215 break;
1216 init_fw_cb->ipv6_vlan_tag =
1217 cpu_to_be16(*(uint16_t *)iface_param->value);
1218 break;
1219 case ISCSI_NET_PARAM_VLAN_ENABLED:
1220 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1221 init_fw_cb->ipv6_opts |=
1222 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1223 else
1224 init_fw_cb->ipv6_opts &=
1225 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1226 break;
1227 case ISCSI_NET_PARAM_MTU:
1228 init_fw_cb->eth_mtu_size =
1229 cpu_to_le16(*(uint16_t *)iface_param->value);
1230 break;
1231 case ISCSI_NET_PARAM_PORT:
1232 /* Autocfg applies to even interface */
1233 if (iface_param->iface_num & 0x1)
1234 break;
1235
1236 init_fw_cb->ipv6_port =
1237 cpu_to_le16(*(uint16_t *)iface_param->value);
1238 break;
1239 default:
1240 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1241 iface_param->param);
1242 break;
1243 }
1244 }
1245
1246 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1247 struct iscsi_iface_param_info *iface_param,
1248 struct addr_ctrl_blk *init_fw_cb)
1249 {
1250 switch (iface_param->param) {
1251 case ISCSI_NET_PARAM_IPV4_ADDR:
1252 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1253 sizeof(init_fw_cb->ipv4_addr));
1254 break;
1255 case ISCSI_NET_PARAM_IPV4_SUBNET:
1256 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1257 sizeof(init_fw_cb->ipv4_subnet));
1258 break;
1259 case ISCSI_NET_PARAM_IPV4_GW:
1260 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1261 sizeof(init_fw_cb->ipv4_gw_addr));
1262 break;
1263 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1264 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1265 init_fw_cb->ipv4_tcp_opts |=
1266 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1267 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1268 init_fw_cb->ipv4_tcp_opts &=
1269 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1270 else
1271 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1272 break;
1273 case ISCSI_NET_PARAM_IFACE_ENABLE:
1274 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1275 init_fw_cb->ipv4_ip_opts |=
1276 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1277 qla4xxx_create_ipv4_iface(ha);
1278 } else {
1279 init_fw_cb->ipv4_ip_opts &=
1280 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1281 0xFFFF);
1282 qla4xxx_destroy_ipv4_iface(ha);
1283 }
1284 break;
1285 case ISCSI_NET_PARAM_VLAN_TAG:
1286 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1287 break;
1288 init_fw_cb->ipv4_vlan_tag =
1289 cpu_to_be16(*(uint16_t *)iface_param->value);
1290 break;
1291 case ISCSI_NET_PARAM_VLAN_ENABLED:
1292 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1293 init_fw_cb->ipv4_ip_opts |=
1294 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1295 else
1296 init_fw_cb->ipv4_ip_opts &=
1297 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1298 break;
1299 case ISCSI_NET_PARAM_MTU:
1300 init_fw_cb->eth_mtu_size =
1301 cpu_to_le16(*(uint16_t *)iface_param->value);
1302 break;
1303 case ISCSI_NET_PARAM_PORT:
1304 init_fw_cb->ipv4_port =
1305 cpu_to_le16(*(uint16_t *)iface_param->value);
1306 break;
1307 default:
1308 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1309 iface_param->param);
1310 break;
1311 }
1312 }
1313
1314 static void
1315 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1316 {
1317 struct addr_ctrl_blk_def *acb;
1318 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1319 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1320 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1321 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1322 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1323 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1324 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1325 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1326 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1327 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1328 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1329 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1330 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1331 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1332 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1333 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1334 }
1335
1336 static int
1337 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1338 {
1339 struct scsi_qla_host *ha = to_qla_host(shost);
1340 int rval = 0;
1341 struct iscsi_iface_param_info *iface_param = NULL;
1342 struct addr_ctrl_blk *init_fw_cb = NULL;
1343 dma_addr_t init_fw_cb_dma;
1344 uint32_t mbox_cmd[MBOX_REG_COUNT];
1345 uint32_t mbox_sts[MBOX_REG_COUNT];
1346 uint32_t rem = len;
1347 struct nlattr *attr;
1348
1349 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1350 sizeof(struct addr_ctrl_blk),
1351 &init_fw_cb_dma, GFP_KERNEL);
1352 if (!init_fw_cb) {
1353 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1354 __func__);
1355 return -ENOMEM;
1356 }
1357
1358 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1359 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1360 memset(&mbox_sts, 0, sizeof(mbox_sts));
1361
1362 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1363 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1364 rval = -EIO;
1365 goto exit_init_fw_cb;
1366 }
1367
1368 nla_for_each_attr(attr, data, len, rem) {
1369 iface_param = nla_data(attr);
1370
1371 if (iface_param->param_type != ISCSI_NET_PARAM)
1372 continue;
1373
1374 switch (iface_param->iface_type) {
1375 case ISCSI_IFACE_TYPE_IPV4:
1376 switch (iface_param->iface_num) {
1377 case 0:
1378 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1379 break;
1380 default:
1381 /* Cannot have more than one IPv4 interface */
1382 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1383 "number = %d\n",
1384 iface_param->iface_num);
1385 break;
1386 }
1387 break;
1388 case ISCSI_IFACE_TYPE_IPV6:
1389 switch (iface_param->iface_num) {
1390 case 0:
1391 case 1:
1392 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1393 break;
1394 default:
1395 /* Cannot have more than two IPv6 interface */
1396 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1397 "number = %d\n",
1398 iface_param->iface_num);
1399 break;
1400 }
1401 break;
1402 default:
1403 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1404 break;
1405 }
1406 }
1407
1408 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1409
1410 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1411 sizeof(struct addr_ctrl_blk),
1412 FLASH_OPT_RMW_COMMIT);
1413 if (rval != QLA_SUCCESS) {
1414 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1415 __func__);
1416 rval = -EIO;
1417 goto exit_init_fw_cb;
1418 }
1419
1420 rval = qla4xxx_disable_acb(ha);
1421 if (rval != QLA_SUCCESS) {
1422 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1423 __func__);
1424 rval = -EIO;
1425 goto exit_init_fw_cb;
1426 }
1427
1428 wait_for_completion_timeout(&ha->disable_acb_comp,
1429 DISABLE_ACB_TOV * HZ);
1430
1431 qla4xxx_initcb_to_acb(init_fw_cb);
1432
1433 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1434 if (rval != QLA_SUCCESS) {
1435 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1436 __func__);
1437 rval = -EIO;
1438 goto exit_init_fw_cb;
1439 }
1440
1441 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1442 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1443 init_fw_cb_dma);
1444
1445 exit_init_fw_cb:
1446 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1447 init_fw_cb, init_fw_cb_dma);
1448
1449 return rval;
1450 }
1451
1452 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1453 enum iscsi_param param, char *buf)
1454 {
1455 struct iscsi_session *sess = cls_sess->dd_data;
1456 struct ddb_entry *ddb_entry = sess->dd_data;
1457 struct scsi_qla_host *ha = ddb_entry->ha;
1458 int rval, len;
1459 uint16_t idx;
1460
1461 switch (param) {
1462 case ISCSI_PARAM_CHAP_IN_IDX:
1463 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1464 sess->password_in, BIDI_CHAP,
1465 &idx);
1466 if (rval)
1467 len = sprintf(buf, "\n");
1468 else
1469 len = sprintf(buf, "%hu\n", idx);
1470 break;
1471 case ISCSI_PARAM_CHAP_OUT_IDX:
1472 rval = qla4xxx_get_chap_index(ha, sess->username,
1473 sess->password, LOCAL_CHAP,
1474 &idx);
1475 if (rval)
1476 len = sprintf(buf, "\n");
1477 else
1478 len = sprintf(buf, "%hu\n", idx);
1479 break;
1480 default:
1481 return iscsi_session_get_param(cls_sess, param, buf);
1482 }
1483
1484 return len;
1485 }
1486
1487 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1488 enum iscsi_param param, char *buf)
1489 {
1490 struct iscsi_conn *conn;
1491 struct qla_conn *qla_conn;
1492 struct sockaddr *dst_addr;
1493 int len = 0;
1494
1495 conn = cls_conn->dd_data;
1496 qla_conn = conn->dd_data;
1497 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1498
1499 switch (param) {
1500 case ISCSI_PARAM_CONN_PORT:
1501 case ISCSI_PARAM_CONN_ADDRESS:
1502 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1503 dst_addr, param, buf);
1504 default:
1505 return iscsi_conn_get_param(cls_conn, param, buf);
1506 }
1507
1508 return len;
1509
1510 }
1511
1512 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1513 {
1514 uint32_t mbx_sts = 0;
1515 uint16_t tmp_ddb_index;
1516 int ret;
1517
1518 get_ddb_index:
1519 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1520
1521 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1522 DEBUG2(ql4_printk(KERN_INFO, ha,
1523 "Free DDB index not available\n"));
1524 ret = QLA_ERROR;
1525 goto exit_get_ddb_index;
1526 }
1527
1528 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1529 goto get_ddb_index;
1530
1531 DEBUG2(ql4_printk(KERN_INFO, ha,
1532 "Found a free DDB index at %d\n", tmp_ddb_index));
1533 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1534 if (ret == QLA_ERROR) {
1535 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1536 ql4_printk(KERN_INFO, ha,
1537 "DDB index = %d not available trying next\n",
1538 tmp_ddb_index);
1539 goto get_ddb_index;
1540 }
1541 DEBUG2(ql4_printk(KERN_INFO, ha,
1542 "Free FW DDB not available\n"));
1543 }
1544
1545 *ddb_index = tmp_ddb_index;
1546
1547 exit_get_ddb_index:
1548 return ret;
1549 }
1550
1551 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1552 struct ddb_entry *ddb_entry,
1553 char *existing_ipaddr,
1554 char *user_ipaddr)
1555 {
1556 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1557 char formatted_ipaddr[DDB_IPADDR_LEN];
1558 int status = QLA_SUCCESS, ret = 0;
1559
1560 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1561 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1562 '\0', NULL);
1563 if (ret == 0) {
1564 status = QLA_ERROR;
1565 goto out_match;
1566 }
1567 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1568 } else {
1569 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1570 '\0', NULL);
1571 if (ret == 0) {
1572 status = QLA_ERROR;
1573 goto out_match;
1574 }
1575 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1576 }
1577
1578 if (strcmp(existing_ipaddr, formatted_ipaddr))
1579 status = QLA_ERROR;
1580
1581 out_match:
1582 return status;
1583 }
1584
1585 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1586 struct iscsi_cls_conn *cls_conn)
1587 {
1588 int idx = 0, max_ddbs, rval;
1589 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1590 struct iscsi_session *sess, *existing_sess;
1591 struct iscsi_conn *conn, *existing_conn;
1592 struct ddb_entry *ddb_entry;
1593
1594 sess = cls_sess->dd_data;
1595 conn = cls_conn->dd_data;
1596
1597 if (sess->targetname == NULL ||
1598 conn->persistent_address == NULL ||
1599 conn->persistent_port == 0)
1600 return QLA_ERROR;
1601
1602 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1603 MAX_DEV_DB_ENTRIES;
1604
1605 for (idx = 0; idx < max_ddbs; idx++) {
1606 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1607 if (ddb_entry == NULL)
1608 continue;
1609
1610 if (ddb_entry->ddb_type != FLASH_DDB)
1611 continue;
1612
1613 existing_sess = ddb_entry->sess->dd_data;
1614 existing_conn = ddb_entry->conn->dd_data;
1615
1616 if (existing_sess->targetname == NULL ||
1617 existing_conn->persistent_address == NULL ||
1618 existing_conn->persistent_port == 0)
1619 continue;
1620
1621 DEBUG2(ql4_printk(KERN_INFO, ha,
1622 "IQN = %s User IQN = %s\n",
1623 existing_sess->targetname,
1624 sess->targetname));
1625
1626 DEBUG2(ql4_printk(KERN_INFO, ha,
1627 "IP = %s User IP = %s\n",
1628 existing_conn->persistent_address,
1629 conn->persistent_address));
1630
1631 DEBUG2(ql4_printk(KERN_INFO, ha,
1632 "Port = %d User Port = %d\n",
1633 existing_conn->persistent_port,
1634 conn->persistent_port));
1635
1636 if (strcmp(existing_sess->targetname, sess->targetname))
1637 continue;
1638 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1639 existing_conn->persistent_address,
1640 conn->persistent_address);
1641 if (rval == QLA_ERROR)
1642 continue;
1643 if (existing_conn->persistent_port != conn->persistent_port)
1644 continue;
1645 break;
1646 }
1647
1648 if (idx == max_ddbs)
1649 return QLA_ERROR;
1650
1651 DEBUG2(ql4_printk(KERN_INFO, ha,
1652 "Match found in fwdb sessions\n"));
1653 return QLA_SUCCESS;
1654 }
1655
1656 static struct iscsi_cls_session *
1657 qla4xxx_session_create(struct iscsi_endpoint *ep,
1658 uint16_t cmds_max, uint16_t qdepth,
1659 uint32_t initial_cmdsn)
1660 {
1661 struct iscsi_cls_session *cls_sess;
1662 struct scsi_qla_host *ha;
1663 struct qla_endpoint *qla_ep;
1664 struct ddb_entry *ddb_entry;
1665 uint16_t ddb_index;
1666 struct iscsi_session *sess;
1667 struct sockaddr *dst_addr;
1668 int ret;
1669
1670 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1671 if (!ep) {
1672 printk(KERN_ERR "qla4xxx: missing ep.\n");
1673 return NULL;
1674 }
1675
1676 qla_ep = ep->dd_data;
1677 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1678 ha = to_qla_host(qla_ep->host);
1679
1680 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1681 if (ret == QLA_ERROR)
1682 return NULL;
1683
1684 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1685 cmds_max, sizeof(struct ddb_entry),
1686 sizeof(struct ql4_task_data),
1687 initial_cmdsn, ddb_index);
1688 if (!cls_sess)
1689 return NULL;
1690
1691 sess = cls_sess->dd_data;
1692 ddb_entry = sess->dd_data;
1693 ddb_entry->fw_ddb_index = ddb_index;
1694 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1695 ddb_entry->ha = ha;
1696 ddb_entry->sess = cls_sess;
1697 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1698 ddb_entry->ddb_change = qla4xxx_ddb_change;
1699 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1700 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1701 ha->tot_ddbs++;
1702
1703 return cls_sess;
1704 }
1705
1706 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1707 {
1708 struct iscsi_session *sess;
1709 struct ddb_entry *ddb_entry;
1710 struct scsi_qla_host *ha;
1711 unsigned long flags, wtime;
1712 struct dev_db_entry *fw_ddb_entry = NULL;
1713 dma_addr_t fw_ddb_entry_dma;
1714 uint32_t ddb_state;
1715 int ret;
1716
1717 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1718 sess = cls_sess->dd_data;
1719 ddb_entry = sess->dd_data;
1720 ha = ddb_entry->ha;
1721
1722 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1723 &fw_ddb_entry_dma, GFP_KERNEL);
1724 if (!fw_ddb_entry) {
1725 ql4_printk(KERN_ERR, ha,
1726 "%s: Unable to allocate dma buffer\n", __func__);
1727 goto destroy_session;
1728 }
1729
1730 wtime = jiffies + (HZ * LOGOUT_TOV);
1731 do {
1732 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1733 fw_ddb_entry, fw_ddb_entry_dma,
1734 NULL, NULL, &ddb_state, NULL,
1735 NULL, NULL);
1736 if (ret == QLA_ERROR)
1737 goto destroy_session;
1738
1739 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1740 (ddb_state == DDB_DS_SESSION_FAILED))
1741 goto destroy_session;
1742
1743 schedule_timeout_uninterruptible(HZ);
1744 } while ((time_after(wtime, jiffies)));
1745
1746 destroy_session:
1747 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1748
1749 spin_lock_irqsave(&ha->hardware_lock, flags);
1750 qla4xxx_free_ddb(ha, ddb_entry);
1751 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1752
1753 iscsi_session_teardown(cls_sess);
1754
1755 if (fw_ddb_entry)
1756 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1757 fw_ddb_entry, fw_ddb_entry_dma);
1758 }
1759
1760 static struct iscsi_cls_conn *
1761 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1762 {
1763 struct iscsi_cls_conn *cls_conn;
1764 struct iscsi_session *sess;
1765 struct ddb_entry *ddb_entry;
1766
1767 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1768 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1769 conn_idx);
1770 if (!cls_conn)
1771 return NULL;
1772
1773 sess = cls_sess->dd_data;
1774 ddb_entry = sess->dd_data;
1775 ddb_entry->conn = cls_conn;
1776
1777 return cls_conn;
1778 }
1779
1780 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1781 struct iscsi_cls_conn *cls_conn,
1782 uint64_t transport_fd, int is_leading)
1783 {
1784 struct iscsi_conn *conn;
1785 struct qla_conn *qla_conn;
1786 struct iscsi_endpoint *ep;
1787
1788 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1789
1790 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1791 return -EINVAL;
1792 ep = iscsi_lookup_endpoint(transport_fd);
1793 conn = cls_conn->dd_data;
1794 qla_conn = conn->dd_data;
1795 qla_conn->qla_ep = ep->dd_data;
1796 return 0;
1797 }
1798
1799 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1800 {
1801 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1802 struct iscsi_session *sess;
1803 struct ddb_entry *ddb_entry;
1804 struct scsi_qla_host *ha;
1805 struct dev_db_entry *fw_ddb_entry = NULL;
1806 dma_addr_t fw_ddb_entry_dma;
1807 uint32_t mbx_sts = 0;
1808 int ret = 0;
1809 int status = QLA_SUCCESS;
1810
1811 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1812 sess = cls_sess->dd_data;
1813 ddb_entry = sess->dd_data;
1814 ha = ddb_entry->ha;
1815
1816 /* Check if we have matching FW DDB, if yes then do not
1817 * login to this target. This could cause target to logout previous
1818 * connection
1819 */
1820 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1821 if (ret == QLA_SUCCESS) {
1822 ql4_printk(KERN_INFO, ha,
1823 "Session already exist in FW.\n");
1824 ret = -EEXIST;
1825 goto exit_conn_start;
1826 }
1827
1828 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1829 &fw_ddb_entry_dma, GFP_KERNEL);
1830 if (!fw_ddb_entry) {
1831 ql4_printk(KERN_ERR, ha,
1832 "%s: Unable to allocate dma buffer\n", __func__);
1833 ret = -ENOMEM;
1834 goto exit_conn_start;
1835 }
1836
1837 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1838 if (ret) {
1839 /* If iscsid is stopped and started then no need to do
1840 * set param again since ddb state will be already
1841 * active and FW does not allow set ddb to an
1842 * active session.
1843 */
1844 if (mbx_sts)
1845 if (ddb_entry->fw_ddb_device_state ==
1846 DDB_DS_SESSION_ACTIVE) {
1847 ddb_entry->unblock_sess(ddb_entry->sess);
1848 goto exit_set_param;
1849 }
1850
1851 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1852 __func__, ddb_entry->fw_ddb_index);
1853 goto exit_conn_start;
1854 }
1855
1856 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1857 if (status == QLA_ERROR) {
1858 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1859 sess->targetname);
1860 ret = -EINVAL;
1861 goto exit_conn_start;
1862 }
1863
1864 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1865 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1866
1867 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1868 ddb_entry->fw_ddb_device_state));
1869
1870 exit_set_param:
1871 ret = 0;
1872
1873 exit_conn_start:
1874 if (fw_ddb_entry)
1875 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1876 fw_ddb_entry, fw_ddb_entry_dma);
1877 return ret;
1878 }
1879
1880 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1881 {
1882 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1883 struct iscsi_session *sess;
1884 struct scsi_qla_host *ha;
1885 struct ddb_entry *ddb_entry;
1886 int options;
1887
1888 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1889 sess = cls_sess->dd_data;
1890 ddb_entry = sess->dd_data;
1891 ha = ddb_entry->ha;
1892
1893 options = LOGOUT_OPTION_CLOSE_SESSION;
1894 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1895 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1896 }
1897
1898 static void qla4xxx_task_work(struct work_struct *wdata)
1899 {
1900 struct ql4_task_data *task_data;
1901 struct scsi_qla_host *ha;
1902 struct passthru_status *sts;
1903 struct iscsi_task *task;
1904 struct iscsi_hdr *hdr;
1905 uint8_t *data;
1906 uint32_t data_len;
1907 struct iscsi_conn *conn;
1908 int hdr_len;
1909 itt_t itt;
1910
1911 task_data = container_of(wdata, struct ql4_task_data, task_work);
1912 ha = task_data->ha;
1913 task = task_data->task;
1914 sts = &task_data->sts;
1915 hdr_len = sizeof(struct iscsi_hdr);
1916
1917 DEBUG3(printk(KERN_INFO "Status returned\n"));
1918 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1919 DEBUG3(printk(KERN_INFO "Response buffer"));
1920 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1921
1922 conn = task->conn;
1923
1924 switch (sts->completionStatus) {
1925 case PASSTHRU_STATUS_COMPLETE:
1926 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1927 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1928 itt = sts->handle;
1929 hdr->itt = itt;
1930 data = task_data->resp_buffer + hdr_len;
1931 data_len = task_data->resp_len - hdr_len;
1932 iscsi_complete_pdu(conn, hdr, data, data_len);
1933 break;
1934 default:
1935 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1936 sts->completionStatus);
1937 break;
1938 }
1939 return;
1940 }
1941
1942 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1943 {
1944 struct ql4_task_data *task_data;
1945 struct iscsi_session *sess;
1946 struct ddb_entry *ddb_entry;
1947 struct scsi_qla_host *ha;
1948 int hdr_len;
1949
1950 sess = task->conn->session;
1951 ddb_entry = sess->dd_data;
1952 ha = ddb_entry->ha;
1953 task_data = task->dd_data;
1954 memset(task_data, 0, sizeof(struct ql4_task_data));
1955
1956 if (task->sc) {
1957 ql4_printk(KERN_INFO, ha,
1958 "%s: SCSI Commands not implemented\n", __func__);
1959 return -EINVAL;
1960 }
1961
1962 hdr_len = sizeof(struct iscsi_hdr);
1963 task_data->ha = ha;
1964 task_data->task = task;
1965
1966 if (task->data_count) {
1967 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1968 task->data_count,
1969 PCI_DMA_TODEVICE);
1970 }
1971
1972 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1973 __func__, task->conn->max_recv_dlength, hdr_len));
1974
1975 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1976 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1977 task_data->resp_len,
1978 &task_data->resp_dma,
1979 GFP_ATOMIC);
1980 if (!task_data->resp_buffer)
1981 goto exit_alloc_pdu;
1982
1983 task_data->req_len = task->data_count + hdr_len;
1984 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1985 task_data->req_len,
1986 &task_data->req_dma,
1987 GFP_ATOMIC);
1988 if (!task_data->req_buffer)
1989 goto exit_alloc_pdu;
1990
1991 task->hdr = task_data->req_buffer;
1992
1993 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1994
1995 return 0;
1996
1997 exit_alloc_pdu:
1998 if (task_data->resp_buffer)
1999 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2000 task_data->resp_buffer, task_data->resp_dma);
2001
2002 if (task_data->req_buffer)
2003 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2004 task_data->req_buffer, task_data->req_dma);
2005 return -ENOMEM;
2006 }
2007
2008 static void qla4xxx_task_cleanup(struct iscsi_task *task)
2009 {
2010 struct ql4_task_data *task_data;
2011 struct iscsi_session *sess;
2012 struct ddb_entry *ddb_entry;
2013 struct scsi_qla_host *ha;
2014 int hdr_len;
2015
2016 hdr_len = sizeof(struct iscsi_hdr);
2017 sess = task->conn->session;
2018 ddb_entry = sess->dd_data;
2019 ha = ddb_entry->ha;
2020 task_data = task->dd_data;
2021
2022 if (task->data_count) {
2023 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
2024 task->data_count, PCI_DMA_TODEVICE);
2025 }
2026
2027 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2028 __func__, task->conn->max_recv_dlength, hdr_len));
2029
2030 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2031 task_data->resp_buffer, task_data->resp_dma);
2032 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2033 task_data->req_buffer, task_data->req_dma);
2034 return;
2035 }
2036
2037 static int qla4xxx_task_xmit(struct iscsi_task *task)
2038 {
2039 struct scsi_cmnd *sc = task->sc;
2040 struct iscsi_session *sess = task->conn->session;
2041 struct ddb_entry *ddb_entry = sess->dd_data;
2042 struct scsi_qla_host *ha = ddb_entry->ha;
2043
2044 if (!sc)
2045 return qla4xxx_send_passthru0(task);
2046
2047 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
2048 __func__);
2049 return -ENOSYS;
2050 }
2051
2052 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2053 struct iscsi_bus_flash_conn *conn,
2054 struct dev_db_entry *fw_ddb_entry)
2055 {
2056 unsigned long options = 0;
2057 int rc = 0;
2058
2059 options = le16_to_cpu(fw_ddb_entry->options);
2060 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2061 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2062 rc = iscsi_switch_str_param(&sess->portal_type,
2063 PORTAL_TYPE_IPV6);
2064 if (rc)
2065 goto exit_copy;
2066 } else {
2067 rc = iscsi_switch_str_param(&sess->portal_type,
2068 PORTAL_TYPE_IPV4);
2069 if (rc)
2070 goto exit_copy;
2071 }
2072
2073 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2074 &options);
2075 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2076 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2077
2078 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2079 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2080 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2081 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2082 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2083 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2084 &options);
2085 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2086 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2087 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2088 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2089 &options);
2090 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2091 sess->discovery_auth_optional =
2092 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2093 if (test_bit(ISCSIOPT_ERL1, &options))
2094 sess->erl |= BIT_1;
2095 if (test_bit(ISCSIOPT_ERL0, &options))
2096 sess->erl |= BIT_0;
2097
2098 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2099 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2100 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2101 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2102 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2103 conn->tcp_timer_scale |= BIT_3;
2104 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2105 conn->tcp_timer_scale |= BIT_2;
2106 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2107 conn->tcp_timer_scale |= BIT_1;
2108
2109 conn->tcp_timer_scale >>= 1;
2110 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2111
2112 options = le16_to_cpu(fw_ddb_entry->ip_options);
2113 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2114
2115 conn->max_recv_dlength = BYTE_UNITS *
2116 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2117 conn->max_xmit_dlength = BYTE_UNITS *
2118 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2119 sess->first_burst = BYTE_UNITS *
2120 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2121 sess->max_burst = BYTE_UNITS *
2122 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2123 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2124 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2125 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2126 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2127 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2128 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2129 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2130 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2131 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2132 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2133 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2134 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2135 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2136 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2137 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2138 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2139
2140 sess->default_taskmgmt_timeout =
2141 le16_to_cpu(fw_ddb_entry->def_timeout);
2142 conn->port = le16_to_cpu(fw_ddb_entry->port);
2143
2144 options = le16_to_cpu(fw_ddb_entry->options);
2145 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2146 if (!conn->ipaddress) {
2147 rc = -ENOMEM;
2148 goto exit_copy;
2149 }
2150
2151 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2152 if (!conn->redirect_ipaddr) {
2153 rc = -ENOMEM;
2154 goto exit_copy;
2155 }
2156
2157 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2158 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2159
2160 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2161 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2162
2163 conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2164 if (!conn->link_local_ipv6_addr) {
2165 rc = -ENOMEM;
2166 goto exit_copy;
2167 }
2168
2169 memcpy(conn->link_local_ipv6_addr,
2170 fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2171 } else {
2172 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2173 }
2174
2175 if (fw_ddb_entry->iscsi_name[0]) {
2176 rc = iscsi_switch_str_param(&sess->targetname,
2177 (char *)fw_ddb_entry->iscsi_name);
2178 if (rc)
2179 goto exit_copy;
2180 }
2181
2182 if (fw_ddb_entry->iscsi_alias[0]) {
2183 rc = iscsi_switch_str_param(&sess->targetalias,
2184 (char *)fw_ddb_entry->iscsi_alias);
2185 if (rc)
2186 goto exit_copy;
2187 }
2188
2189 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2190
2191 exit_copy:
2192 return rc;
2193 }
2194
2195 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2196 struct iscsi_bus_flash_conn *conn,
2197 struct dev_db_entry *fw_ddb_entry)
2198 {
2199 uint16_t options;
2200 int rc = 0;
2201
2202 options = le16_to_cpu(fw_ddb_entry->options);
2203 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
2204 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2205 options |= BIT_8;
2206 else
2207 options &= ~BIT_8;
2208
2209 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2210 SET_BITVAL(sess->discovery_sess, options, BIT_4);
2211 SET_BITVAL(sess->entry_state, options, BIT_3);
2212 fw_ddb_entry->options = cpu_to_le16(options);
2213
2214 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2215 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2216 SET_BITVAL(conn->datadgst_en, options, BIT_12);
2217 SET_BITVAL(sess->imm_data_en, options, BIT_11);
2218 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2219 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2220 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2221 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2222 SET_BITVAL(conn->snack_req_en, options, BIT_6);
2223 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2224 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2225 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2226 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2227 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2228 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2229
2230 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2231 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2232 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2233 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2234 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2235 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2236 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2237 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2238 fw_ddb_entry->tcp_options = cpu_to_le16(options);
2239
2240 options = le16_to_cpu(fw_ddb_entry->ip_options);
2241 SET_BITVAL(conn->fragment_disable, options, BIT_4);
2242 fw_ddb_entry->ip_options = cpu_to_le16(options);
2243
2244 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2245 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2246 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2247 fw_ddb_entry->iscsi_max_snd_data_seg_len =
2248 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2249 fw_ddb_entry->iscsi_first_burst_len =
2250 cpu_to_le16(sess->first_burst / BYTE_UNITS);
2251 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2252 BYTE_UNITS);
2253 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2254 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2255 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2256 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2257 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2258 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2259 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2260 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2261 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2262 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2263 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2264 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
2265 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2266 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2267 fw_ddb_entry->port = cpu_to_le16(conn->port);
2268 fw_ddb_entry->def_timeout =
2269 cpu_to_le16(sess->default_taskmgmt_timeout);
2270
2271 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2272 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
2273 else
2274 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2275
2276 if (conn->ipaddress)
2277 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2278 sizeof(fw_ddb_entry->ip_addr));
2279
2280 if (conn->redirect_ipaddr)
2281 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2282 sizeof(fw_ddb_entry->tgt_addr));
2283
2284 if (conn->link_local_ipv6_addr)
2285 memcpy(fw_ddb_entry->link_local_ipv6_addr,
2286 conn->link_local_ipv6_addr,
2287 sizeof(fw_ddb_entry->link_local_ipv6_addr));
2288
2289 if (sess->targetname)
2290 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2291 sizeof(fw_ddb_entry->iscsi_name));
2292
2293 if (sess->targetalias)
2294 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2295 sizeof(fw_ddb_entry->iscsi_alias));
2296
2297 COPY_ISID(fw_ddb_entry->isid, sess->isid);
2298
2299 return rc;
2300 }
2301
2302 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
2303 struct iscsi_session *sess,
2304 struct dev_db_entry *fw_ddb_entry)
2305 {
2306 unsigned long options = 0;
2307 uint16_t ddb_link;
2308 uint16_t disc_parent;
2309
2310 options = le16_to_cpu(fw_ddb_entry->options);
2311 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2312 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2313 &options);
2314 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2315
2316 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2317 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2318 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2319 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2320 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2321 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2322 &options);
2323 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2324 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2325 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2326 &options);
2327 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2328 sess->discovery_auth_optional =
2329 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2330 if (test_bit(ISCSIOPT_ERL1, &options))
2331 sess->erl |= BIT_1;
2332 if (test_bit(ISCSIOPT_ERL0, &options))
2333 sess->erl |= BIT_0;
2334
2335 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2336 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2337 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2338 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2339 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2340 conn->tcp_timer_scale |= BIT_3;
2341 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2342 conn->tcp_timer_scale |= BIT_2;
2343 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2344 conn->tcp_timer_scale |= BIT_1;
2345
2346 conn->tcp_timer_scale >>= 1;
2347 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2348
2349 options = le16_to_cpu(fw_ddb_entry->ip_options);
2350 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2351
2352 conn->max_recv_dlength = BYTE_UNITS *
2353 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2354 conn->max_xmit_dlength = BYTE_UNITS *
2355 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2356 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2357 sess->first_burst = BYTE_UNITS *
2358 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2359 sess->max_burst = BYTE_UNITS *
2360 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2361 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2362 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2363 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2364 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2365 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2366 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2367 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2368 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
2369 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2370 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2371 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2372 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2373 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2374
2375 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
2376 if (ddb_link == DDB_ISNS)
2377 disc_parent = ISCSI_DISC_PARENT_ISNS;
2378 else if (ddb_link == DDB_NO_LINK)
2379 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2380 else if (ddb_link < MAX_DDB_ENTRIES)
2381 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
2382 else
2383 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2384
2385 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
2386 iscsi_get_discovery_parent_name(disc_parent), 0);
2387
2388 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2389 (char *)fw_ddb_entry->iscsi_alias, 0);
2390 }
2391
2392 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2393 struct dev_db_entry *fw_ddb_entry,
2394 struct iscsi_cls_session *cls_sess,
2395 struct iscsi_cls_conn *cls_conn)
2396 {
2397 int buflen = 0;
2398 struct iscsi_session *sess;
2399 struct ddb_entry *ddb_entry;
2400 struct ql4_chap_table chap_tbl;
2401 struct iscsi_conn *conn;
2402 char ip_addr[DDB_IPADDR_LEN];
2403 uint16_t options = 0;
2404
2405 sess = cls_sess->dd_data;
2406 ddb_entry = sess->dd_data;
2407 conn = cls_conn->dd_data;
2408 memset(&chap_tbl, 0, sizeof(chap_tbl));
2409
2410 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2411
2412 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2413
2414 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
2415 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2416
2417 memset(ip_addr, 0, sizeof(ip_addr));
2418 options = le16_to_cpu(fw_ddb_entry->options);
2419 if (options & DDB_OPT_IPV6_DEVICE) {
2420 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
2421
2422 memset(ip_addr, 0, sizeof(ip_addr));
2423 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2424 } else {
2425 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
2426 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2427 }
2428
2429 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2430 (char *)ip_addr, buflen);
2431 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2432 (char *)fw_ddb_entry->iscsi_name, buflen);
2433 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2434 (char *)ha->name_string, buflen);
2435
2436 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2437 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2438 chap_tbl.secret,
2439 ddb_entry->chap_tbl_idx)) {
2440 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2441 (char *)chap_tbl.name,
2442 strlen((char *)chap_tbl.name));
2443 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2444 (char *)chap_tbl.secret,
2445 chap_tbl.secret_len);
2446 }
2447 }
2448 }
2449
2450 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
2451 struct ddb_entry *ddb_entry)
2452 {
2453 struct iscsi_cls_session *cls_sess;
2454 struct iscsi_cls_conn *cls_conn;
2455 uint32_t ddb_state;
2456 dma_addr_t fw_ddb_entry_dma;
2457 struct dev_db_entry *fw_ddb_entry;
2458
2459 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2460 &fw_ddb_entry_dma, GFP_KERNEL);
2461 if (!fw_ddb_entry) {
2462 ql4_printk(KERN_ERR, ha,
2463 "%s: Unable to allocate dma buffer\n", __func__);
2464 goto exit_session_conn_fwddb_param;
2465 }
2466
2467 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2468 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2469 NULL, NULL, NULL) == QLA_ERROR) {
2470 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2471 "get_ddb_entry for fw_ddb_index %d\n",
2472 ha->host_no, __func__,
2473 ddb_entry->fw_ddb_index));
2474 goto exit_session_conn_fwddb_param;
2475 }
2476
2477 cls_sess = ddb_entry->sess;
2478
2479 cls_conn = ddb_entry->conn;
2480
2481 /* Update params */
2482 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2483
2484 exit_session_conn_fwddb_param:
2485 if (fw_ddb_entry)
2486 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2487 fw_ddb_entry, fw_ddb_entry_dma);
2488 }
2489
2490 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2491 struct ddb_entry *ddb_entry)
2492 {
2493 struct iscsi_cls_session *cls_sess;
2494 struct iscsi_cls_conn *cls_conn;
2495 struct iscsi_session *sess;
2496 struct iscsi_conn *conn;
2497 uint32_t ddb_state;
2498 dma_addr_t fw_ddb_entry_dma;
2499 struct dev_db_entry *fw_ddb_entry;
2500
2501 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2502 &fw_ddb_entry_dma, GFP_KERNEL);
2503 if (!fw_ddb_entry) {
2504 ql4_printk(KERN_ERR, ha,
2505 "%s: Unable to allocate dma buffer\n", __func__);
2506 goto exit_session_conn_param;
2507 }
2508
2509 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2510 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2511 NULL, NULL, NULL) == QLA_ERROR) {
2512 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2513 "get_ddb_entry for fw_ddb_index %d\n",
2514 ha->host_no, __func__,
2515 ddb_entry->fw_ddb_index));
2516 goto exit_session_conn_param;
2517 }
2518
2519 cls_sess = ddb_entry->sess;
2520 sess = cls_sess->dd_data;
2521
2522 cls_conn = ddb_entry->conn;
2523 conn = cls_conn->dd_data;
2524
2525 /* Update timers after login */
2526 ddb_entry->default_relogin_timeout =
2527 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2528 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2529 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2530 ddb_entry->default_time2wait =
2531 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2532
2533 /* Update params */
2534 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2535 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2536
2537 memcpy(sess->initiatorname, ha->name_string,
2538 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2539
2540 exit_session_conn_param:
2541 if (fw_ddb_entry)
2542 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2543 fw_ddb_entry, fw_ddb_entry_dma);
2544 }
2545
2546 /*
2547 * Timer routines
2548 */
2549
2550 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2551 unsigned long interval)
2552 {
2553 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2554 __func__, ha->host->host_no));
2555 init_timer(&ha->timer);
2556 ha->timer.expires = jiffies + interval * HZ;
2557 ha->timer.data = (unsigned long)ha;
2558 ha->timer.function = (void (*)(unsigned long))func;
2559 add_timer(&ha->timer);
2560 ha->timer_active = 1;
2561 }
2562
2563 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2564 {
2565 del_timer_sync(&ha->timer);
2566 ha->timer_active = 0;
2567 }
2568
2569 /***
2570 * qla4xxx_mark_device_missing - blocks the session
2571 * @cls_session: Pointer to the session to be blocked
2572 * @ddb_entry: Pointer to device database entry
2573 *
2574 * This routine marks a device missing and close connection.
2575 **/
2576 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2577 {
2578 iscsi_block_session(cls_session);
2579 }
2580
2581 /**
2582 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2583 * @ha: Pointer to host adapter structure.
2584 *
2585 * This routine marks a device missing and resets the relogin retry count.
2586 **/
2587 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2588 {
2589 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2590 }
2591
2592 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2593 struct ddb_entry *ddb_entry,
2594 struct scsi_cmnd *cmd)
2595 {
2596 struct srb *srb;
2597
2598 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2599 if (!srb)
2600 return srb;
2601
2602 kref_init(&srb->srb_ref);
2603 srb->ha = ha;
2604 srb->ddb = ddb_entry;
2605 srb->cmd = cmd;
2606 srb->flags = 0;
2607 CMD_SP(cmd) = (void *)srb;
2608
2609 return srb;
2610 }
2611
2612 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2613 {
2614 struct scsi_cmnd *cmd = srb->cmd;
2615
2616 if (srb->flags & SRB_DMA_VALID) {
2617 scsi_dma_unmap(cmd);
2618 srb->flags &= ~SRB_DMA_VALID;
2619 }
2620 CMD_SP(cmd) = NULL;
2621 }
2622
2623 void qla4xxx_srb_compl(struct kref *ref)
2624 {
2625 struct srb *srb = container_of(ref, struct srb, srb_ref);
2626 struct scsi_cmnd *cmd = srb->cmd;
2627 struct scsi_qla_host *ha = srb->ha;
2628
2629 qla4xxx_srb_free_dma(ha, srb);
2630
2631 mempool_free(srb, ha->srb_mempool);
2632
2633 cmd->scsi_done(cmd);
2634 }
2635
2636 /**
2637 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2638 * @host: scsi host
2639 * @cmd: Pointer to Linux's SCSI command structure
2640 *
2641 * Remarks:
2642 * This routine is invoked by Linux to send a SCSI command to the driver.
2643 * The mid-level driver tries to ensure that queuecommand never gets
2644 * invoked concurrently with itself or the interrupt handler (although
2645 * the interrupt handler may call this routine as part of request-
2646 * completion handling). Unfortunely, it sometimes calls the scheduler
2647 * in interrupt context which is a big NO! NO!.
2648 **/
2649 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2650 {
2651 struct scsi_qla_host *ha = to_qla_host(host);
2652 struct ddb_entry *ddb_entry = cmd->device->hostdata;
2653 struct iscsi_cls_session *sess = ddb_entry->sess;
2654 struct srb *srb;
2655 int rval;
2656
2657 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2658 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2659 cmd->result = DID_NO_CONNECT << 16;
2660 else
2661 cmd->result = DID_REQUEUE << 16;
2662 goto qc_fail_command;
2663 }
2664
2665 if (!sess) {
2666 cmd->result = DID_IMM_RETRY << 16;
2667 goto qc_fail_command;
2668 }
2669
2670 rval = iscsi_session_chkready(sess);
2671 if (rval) {
2672 cmd->result = rval;
2673 goto qc_fail_command;
2674 }
2675
2676 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2677 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2678 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2679 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2680 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2681 !test_bit(AF_ONLINE, &ha->flags) ||
2682 !test_bit(AF_LINK_UP, &ha->flags) ||
2683 test_bit(AF_LOOPBACK, &ha->flags) ||
2684 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
2685 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
2686 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2687 goto qc_host_busy;
2688
2689 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2690 if (!srb)
2691 goto qc_host_busy;
2692
2693 rval = qla4xxx_send_command_to_isp(ha, srb);
2694 if (rval != QLA_SUCCESS)
2695 goto qc_host_busy_free_sp;
2696
2697 return 0;
2698
2699 qc_host_busy_free_sp:
2700 qla4xxx_srb_free_dma(ha, srb);
2701 mempool_free(srb, ha->srb_mempool);
2702
2703 qc_host_busy:
2704 return SCSI_MLQUEUE_HOST_BUSY;
2705
2706 qc_fail_command:
2707 cmd->scsi_done(cmd);
2708
2709 return 0;
2710 }
2711
2712 /**
2713 * qla4xxx_mem_free - frees memory allocated to adapter
2714 * @ha: Pointer to host adapter structure.
2715 *
2716 * Frees memory previously allocated by qla4xxx_mem_alloc
2717 **/
2718 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2719 {
2720 if (ha->queues)
2721 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2722 ha->queues_dma);
2723
2724 if (ha->fw_dump)
2725 vfree(ha->fw_dump);
2726
2727 ha->queues_len = 0;
2728 ha->queues = NULL;
2729 ha->queues_dma = 0;
2730 ha->request_ring = NULL;
2731 ha->request_dma = 0;
2732 ha->response_ring = NULL;
2733 ha->response_dma = 0;
2734 ha->shadow_regs = NULL;
2735 ha->shadow_regs_dma = 0;
2736 ha->fw_dump = NULL;
2737 ha->fw_dump_size = 0;
2738
2739 /* Free srb pool. */
2740 if (ha->srb_mempool)
2741 mempool_destroy(ha->srb_mempool);
2742
2743 ha->srb_mempool = NULL;
2744
2745 if (ha->chap_dma_pool)
2746 dma_pool_destroy(ha->chap_dma_pool);
2747
2748 if (ha->chap_list)
2749 vfree(ha->chap_list);
2750 ha->chap_list = NULL;
2751
2752 if (ha->fw_ddb_dma_pool)
2753 dma_pool_destroy(ha->fw_ddb_dma_pool);
2754
2755 /* release io space registers */
2756 if (is_qla8022(ha)) {
2757 if (ha->nx_pcibase)
2758 iounmap(
2759 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2760 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2761 if (ha->nx_pcibase)
2762 iounmap(
2763 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2764 } else if (ha->reg) {
2765 iounmap(ha->reg);
2766 }
2767
2768 if (ha->reset_tmplt.buff)
2769 vfree(ha->reset_tmplt.buff);
2770
2771 pci_release_regions(ha->pdev);
2772 }
2773
2774 /**
2775 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2776 * @ha: Pointer to host adapter structure
2777 *
2778 * Allocates DMA memory for request and response queues. Also allocates memory
2779 * for srbs.
2780 **/
2781 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2782 {
2783 unsigned long align;
2784
2785 /* Allocate contiguous block of DMA memory for queues. */
2786 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2787 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2788 sizeof(struct shadow_regs) +
2789 MEM_ALIGN_VALUE +
2790 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2791 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2792 &ha->queues_dma, GFP_KERNEL);
2793 if (ha->queues == NULL) {
2794 ql4_printk(KERN_WARNING, ha,
2795 "Memory Allocation failed - queues.\n");
2796
2797 goto mem_alloc_error_exit;
2798 }
2799 memset(ha->queues, 0, ha->queues_len);
2800
2801 /*
2802 * As per RISC alignment requirements -- the bus-address must be a
2803 * multiple of the request-ring size (in bytes).
2804 */
2805 align = 0;
2806 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2807 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2808 (MEM_ALIGN_VALUE - 1));
2809
2810 /* Update request and response queue pointers. */
2811 ha->request_dma = ha->queues_dma + align;
2812 ha->request_ring = (struct queue_entry *) (ha->queues + align);
2813 ha->response_dma = ha->queues_dma + align +
2814 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2815 ha->response_ring = (struct queue_entry *) (ha->queues + align +
2816 (REQUEST_QUEUE_DEPTH *
2817 QUEUE_SIZE));
2818 ha->shadow_regs_dma = ha->queues_dma + align +
2819 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2820 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2821 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2822 (REQUEST_QUEUE_DEPTH *
2823 QUEUE_SIZE) +
2824 (RESPONSE_QUEUE_DEPTH *
2825 QUEUE_SIZE));
2826
2827 /* Allocate memory for srb pool. */
2828 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2829 mempool_free_slab, srb_cachep);
2830 if (ha->srb_mempool == NULL) {
2831 ql4_printk(KERN_WARNING, ha,
2832 "Memory Allocation failed - SRB Pool.\n");
2833
2834 goto mem_alloc_error_exit;
2835 }
2836
2837 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2838 CHAP_DMA_BLOCK_SIZE, 8, 0);
2839
2840 if (ha->chap_dma_pool == NULL) {
2841 ql4_printk(KERN_WARNING, ha,
2842 "%s: chap_dma_pool allocation failed..\n", __func__);
2843 goto mem_alloc_error_exit;
2844 }
2845
2846 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2847 DDB_DMA_BLOCK_SIZE, 8, 0);
2848
2849 if (ha->fw_ddb_dma_pool == NULL) {
2850 ql4_printk(KERN_WARNING, ha,
2851 "%s: fw_ddb_dma_pool allocation failed..\n",
2852 __func__);
2853 goto mem_alloc_error_exit;
2854 }
2855
2856 return QLA_SUCCESS;
2857
2858 mem_alloc_error_exit:
2859 qla4xxx_mem_free(ha);
2860 return QLA_ERROR;
2861 }
2862
2863 /**
2864 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2865 * @ha: adapter block pointer.
2866 *
2867 * Note: The caller should not hold the idc lock.
2868 **/
2869 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2870 {
2871 uint32_t temp, temp_state, temp_val;
2872 int status = QLA_SUCCESS;
2873
2874 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2875
2876 temp_state = qla82xx_get_temp_state(temp);
2877 temp_val = qla82xx_get_temp_val(temp);
2878
2879 if (temp_state == QLA82XX_TEMP_PANIC) {
2880 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2881 " exceeds maximum allowed. Hardware has been shut"
2882 " down.\n", temp_val);
2883 status = QLA_ERROR;
2884 } else if (temp_state == QLA82XX_TEMP_WARN) {
2885 if (ha->temperature == QLA82XX_TEMP_NORMAL)
2886 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2887 " degrees C exceeds operating range."
2888 " Immediate action needed.\n", temp_val);
2889 } else {
2890 if (ha->temperature == QLA82XX_TEMP_WARN)
2891 ql4_printk(KERN_INFO, ha, "Device temperature is"
2892 " now %d degrees C in normal range.\n",
2893 temp_val);
2894 }
2895 ha->temperature = temp_state;
2896 return status;
2897 }
2898
2899 /**
2900 * qla4_8xxx_check_fw_alive - Check firmware health
2901 * @ha: Pointer to host adapter structure.
2902 *
2903 * Context: Interrupt
2904 **/
2905 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2906 {
2907 uint32_t fw_heartbeat_counter;
2908 int status = QLA_SUCCESS;
2909
2910 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2911 QLA8XXX_PEG_ALIVE_COUNTER);
2912 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2913 if (fw_heartbeat_counter == 0xffffffff) {
2914 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2915 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2916 ha->host_no, __func__));
2917 return status;
2918 }
2919
2920 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2921 ha->seconds_since_last_heartbeat++;
2922 /* FW not alive after 2 seconds */
2923 if (ha->seconds_since_last_heartbeat == 2) {
2924 ha->seconds_since_last_heartbeat = 0;
2925 qla4_8xxx_dump_peg_reg(ha);
2926 status = QLA_ERROR;
2927 }
2928 } else
2929 ha->seconds_since_last_heartbeat = 0;
2930
2931 ha->fw_heartbeat_counter = fw_heartbeat_counter;
2932 return status;
2933 }
2934
2935 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2936 {
2937 uint32_t halt_status;
2938 int halt_status_unrecoverable = 0;
2939
2940 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
2941
2942 if (is_qla8022(ha)) {
2943 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2944 __func__);
2945 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2946 CRB_NIU_XG_PAUSE_CTL_P0 |
2947 CRB_NIU_XG_PAUSE_CTL_P1);
2948
2949 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2950 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
2951 __func__);
2952 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2953 halt_status_unrecoverable = 1;
2954 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2955 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2956 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2957 __func__);
2958 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
2959 halt_status_unrecoverable = 1;
2960 }
2961
2962 /*
2963 * Since we cannot change dev_state in interrupt context,
2964 * set appropriate DPC flag then wakeup DPC
2965 */
2966 if (halt_status_unrecoverable) {
2967 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2968 } else {
2969 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
2970 __func__);
2971 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2972 }
2973 qla4xxx_mailbox_premature_completion(ha);
2974 qla4xxx_wake_dpc(ha);
2975 }
2976
2977 /**
2978 * qla4_8xxx_watchdog - Poll dev state
2979 * @ha: Pointer to host adapter structure.
2980 *
2981 * Context: Interrupt
2982 **/
2983 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2984 {
2985 uint32_t dev_state;
2986 uint32_t idc_ctrl;
2987
2988 /* don't poll if reset is going on */
2989 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2990 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2991 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2992 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2993
2994 if (qla4_8xxx_check_temp(ha)) {
2995 if (is_qla8022(ha)) {
2996 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
2997 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2998 CRB_NIU_XG_PAUSE_CTL_P0 |
2999 CRB_NIU_XG_PAUSE_CTL_P1);
3000 }
3001 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
3002 qla4xxx_wake_dpc(ha);
3003 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
3004 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3005
3006 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
3007 __func__);
3008
3009 if (is_qla8032(ha) || is_qla8042(ha)) {
3010 idc_ctrl = qla4_83xx_rd_reg(ha,
3011 QLA83XX_IDC_DRV_CTRL);
3012 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
3013 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
3014 __func__);
3015 qla4xxx_mailbox_premature_completion(
3016 ha);
3017 }
3018 }
3019
3020 if ((is_qla8032(ha) || is_qla8042(ha)) ||
3021 (is_qla8022(ha) && !ql4xdontresethba)) {
3022 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3023 qla4xxx_wake_dpc(ha);
3024 }
3025 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3026 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3027 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
3028 __func__);
3029 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
3030 qla4xxx_wake_dpc(ha);
3031 } else {
3032 /* Check firmware health */
3033 if (qla4_8xxx_check_fw_alive(ha))
3034 qla4_8xxx_process_fw_error(ha);
3035 }
3036 }
3037 }
3038
3039 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3040 {
3041 struct iscsi_session *sess;
3042 struct ddb_entry *ddb_entry;
3043 struct scsi_qla_host *ha;
3044
3045 sess = cls_sess->dd_data;
3046 ddb_entry = sess->dd_data;
3047 ha = ddb_entry->ha;
3048
3049 if (!(ddb_entry->ddb_type == FLASH_DDB))
3050 return;
3051
3052 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
3053 !iscsi_is_session_online(cls_sess)) {
3054 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
3055 INVALID_ENTRY) {
3056 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
3057 0) {
3058 atomic_set(&ddb_entry->retry_relogin_timer,
3059 INVALID_ENTRY);
3060 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3061 set_bit(DF_RELOGIN, &ddb_entry->flags);
3062 DEBUG2(ql4_printk(KERN_INFO, ha,
3063 "%s: index [%d] login device\n",
3064 __func__, ddb_entry->fw_ddb_index));
3065 } else
3066 atomic_dec(&ddb_entry->retry_relogin_timer);
3067 }
3068 }
3069
3070 /* Wait for relogin to timeout */
3071 if (atomic_read(&ddb_entry->relogin_timer) &&
3072 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
3073 /*
3074 * If the relogin times out and the device is
3075 * still NOT ONLINE then try and relogin again.
3076 */
3077 if (!iscsi_is_session_online(cls_sess)) {
3078 /* Reset retry relogin timer */
3079 atomic_inc(&ddb_entry->relogin_retry_count);
3080 DEBUG2(ql4_printk(KERN_INFO, ha,
3081 "%s: index[%d] relogin timed out-retrying"
3082 " relogin (%d), retry (%d)\n", __func__,
3083 ddb_entry->fw_ddb_index,
3084 atomic_read(&ddb_entry->relogin_retry_count),
3085 ddb_entry->default_time2wait + 4));
3086 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3087 atomic_set(&ddb_entry->retry_relogin_timer,
3088 ddb_entry->default_time2wait + 4);
3089 }
3090 }
3091 }
3092
3093 /**
3094 * qla4xxx_timer - checks every second for work to do.
3095 * @ha: Pointer to host adapter structure.
3096 **/
3097 static void qla4xxx_timer(struct scsi_qla_host *ha)
3098 {
3099 int start_dpc = 0;
3100 uint16_t w;
3101
3102 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
3103
3104 /* If we are in the middle of AER/EEH processing
3105 * skip any processing and reschedule the timer
3106 */
3107 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3108 mod_timer(&ha->timer, jiffies + HZ);
3109 return;
3110 }
3111
3112 /* Hardware read to trigger an EEH error during mailbox waits. */
3113 if (!pci_channel_offline(ha->pdev))
3114 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3115
3116 if (is_qla80XX(ha))
3117 qla4_8xxx_watchdog(ha);
3118
3119 if (is_qla40XX(ha)) {
3120 /* Check for heartbeat interval. */
3121 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
3122 ha->heartbeat_interval != 0) {
3123 ha->seconds_since_last_heartbeat++;
3124 if (ha->seconds_since_last_heartbeat >
3125 ha->heartbeat_interval + 2)
3126 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3127 }
3128 }
3129
3130 /* Process any deferred work. */
3131 if (!list_empty(&ha->work_list))
3132 start_dpc++;
3133
3134 /* Wakeup the dpc routine for this adapter, if needed. */
3135 if (start_dpc ||
3136 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3137 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
3138 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
3139 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3140 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3141 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
3142 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
3143 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
3144 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
3145 test_bit(DPC_AEN, &ha->dpc_flags)) {
3146 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
3147 " - dpc flags = 0x%lx\n",
3148 ha->host_no, __func__, ha->dpc_flags));
3149 qla4xxx_wake_dpc(ha);
3150 }
3151
3152 /* Reschedule timer thread to call us back in one second */
3153 mod_timer(&ha->timer, jiffies + HZ);
3154
3155 DEBUG2(ha->seconds_since_last_intr++);
3156 }
3157
3158 /**
3159 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
3160 * @ha: Pointer to host adapter structure.
3161 *
3162 * This routine stalls the driver until all outstanding commands are returned.
3163 * Caller must release the Hardware Lock prior to calling this routine.
3164 **/
3165 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
3166 {
3167 uint32_t index = 0;
3168 unsigned long flags;
3169 struct scsi_cmnd *cmd;
3170
3171 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
3172
3173 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
3174 "complete\n", WAIT_CMD_TOV));
3175
3176 while (!time_after_eq(jiffies, wtime)) {
3177 spin_lock_irqsave(&ha->hardware_lock, flags);
3178 /* Find a command that hasn't completed. */
3179 for (index = 0; index < ha->host->can_queue; index++) {
3180 cmd = scsi_host_find_tag(ha->host, index);
3181 /*
3182 * We cannot just check if the index is valid,
3183 * becase if we are run from the scsi eh, then
3184 * the scsi/block layer is going to prevent
3185 * the tag from being released.
3186 */
3187 if (cmd != NULL && CMD_SP(cmd))
3188 break;
3189 }
3190 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3191
3192 /* If No Commands are pending, wait is complete */
3193 if (index == ha->host->can_queue)
3194 return QLA_SUCCESS;
3195
3196 msleep(1000);
3197 }
3198 /* If we timed out on waiting for commands to come back
3199 * return ERROR. */
3200 return QLA_ERROR;
3201 }
3202
3203 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
3204 {
3205 uint32_t ctrl_status;
3206 unsigned long flags = 0;
3207
3208 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
3209
3210 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
3211 return QLA_ERROR;
3212
3213 spin_lock_irqsave(&ha->hardware_lock, flags);
3214
3215 /*
3216 * If the SCSI Reset Interrupt bit is set, clear it.
3217 * Otherwise, the Soft Reset won't work.
3218 */
3219 ctrl_status = readw(&ha->reg->ctrl_status);
3220 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
3221 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3222
3223 /* Issue Soft Reset */
3224 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
3225 readl(&ha->reg->ctrl_status);
3226
3227 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3228 return QLA_SUCCESS;
3229 }
3230
3231 /**
3232 * qla4xxx_soft_reset - performs soft reset.
3233 * @ha: Pointer to host adapter structure.
3234 **/
3235 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
3236 {
3237 uint32_t max_wait_time;
3238 unsigned long flags = 0;
3239 int status;
3240 uint32_t ctrl_status;
3241
3242 status = qla4xxx_hw_reset(ha);
3243 if (status != QLA_SUCCESS)
3244 return status;
3245
3246 status = QLA_ERROR;
3247 /* Wait until the Network Reset Intr bit is cleared */
3248 max_wait_time = RESET_INTR_TOV;
3249 do {
3250 spin_lock_irqsave(&ha->hardware_lock, flags);
3251 ctrl_status = readw(&ha->reg->ctrl_status);
3252 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3253
3254 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
3255 break;
3256
3257 msleep(1000);
3258 } while ((--max_wait_time));
3259
3260 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
3261 DEBUG2(printk(KERN_WARNING
3262 "scsi%ld: Network Reset Intr not cleared by "
3263 "Network function, clearing it now!\n",
3264 ha->host_no));
3265 spin_lock_irqsave(&ha->hardware_lock, flags);
3266 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
3267 readl(&ha->reg->ctrl_status);
3268 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3269 }
3270
3271 /* Wait until the firmware tells us the Soft Reset is done */
3272 max_wait_time = SOFT_RESET_TOV;
3273 do {
3274 spin_lock_irqsave(&ha->hardware_lock, flags);
3275 ctrl_status = readw(&ha->reg->ctrl_status);
3276 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3277
3278 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
3279 status = QLA_SUCCESS;
3280 break;
3281 }
3282
3283 msleep(1000);
3284 } while ((--max_wait_time));
3285
3286 /*
3287 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
3288 * after the soft reset has taken place.
3289 */
3290 spin_lock_irqsave(&ha->hardware_lock, flags);
3291 ctrl_status = readw(&ha->reg->ctrl_status);
3292 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
3293 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3294 readl(&ha->reg->ctrl_status);
3295 }
3296 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3297
3298 /* If soft reset fails then most probably the bios on other
3299 * function is also enabled.
3300 * Since the initialization is sequential the other fn
3301 * wont be able to acknowledge the soft reset.
3302 * Issue a force soft reset to workaround this scenario.
3303 */
3304 if (max_wait_time == 0) {
3305 /* Issue Force Soft Reset */
3306 spin_lock_irqsave(&ha->hardware_lock, flags);
3307 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
3308 readl(&ha->reg->ctrl_status);
3309 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3310 /* Wait until the firmware tells us the Soft Reset is done */
3311 max_wait_time = SOFT_RESET_TOV;
3312 do {
3313 spin_lock_irqsave(&ha->hardware_lock, flags);
3314 ctrl_status = readw(&ha->reg->ctrl_status);
3315 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3316
3317 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
3318 status = QLA_SUCCESS;
3319 break;
3320 }
3321
3322 msleep(1000);
3323 } while ((--max_wait_time));
3324 }
3325
3326 return status;
3327 }
3328
3329 /**
3330 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
3331 * @ha: Pointer to host adapter structure.
3332 * @res: returned scsi status
3333 *
3334 * This routine is called just prior to a HARD RESET to return all
3335 * outstanding commands back to the Operating System.
3336 * Caller should make sure that the following locks are released
3337 * before this calling routine: Hardware lock, and io_request_lock.
3338 **/
3339 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
3340 {
3341 struct srb *srb;
3342 int i;
3343 unsigned long flags;
3344
3345 spin_lock_irqsave(&ha->hardware_lock, flags);
3346 for (i = 0; i < ha->host->can_queue; i++) {
3347 srb = qla4xxx_del_from_active_array(ha, i);
3348 if (srb != NULL) {
3349 srb->cmd->result = res;
3350 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3351 }
3352 }
3353 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3354 }
3355
3356 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
3357 {
3358 clear_bit(AF_ONLINE, &ha->flags);
3359
3360 /* Disable the board */
3361 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
3362
3363 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3364 qla4xxx_mark_all_devices_missing(ha);
3365 clear_bit(AF_INIT_DONE, &ha->flags);
3366 }
3367
3368 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
3369 {
3370 struct iscsi_session *sess;
3371 struct ddb_entry *ddb_entry;
3372
3373 sess = cls_session->dd_data;
3374 ddb_entry = sess->dd_data;
3375 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
3376
3377 if (ddb_entry->ddb_type == FLASH_DDB)
3378 iscsi_block_session(ddb_entry->sess);
3379 else
3380 iscsi_session_failure(cls_session->dd_data,
3381 ISCSI_ERR_CONN_FAILED);
3382 }
3383
3384 /**
3385 * qla4xxx_recover_adapter - recovers adapter after a fatal error
3386 * @ha: Pointer to host adapter structure.
3387 **/
3388 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3389 {
3390 int status = QLA_ERROR;
3391 uint8_t reset_chip = 0;
3392 uint32_t dev_state;
3393 unsigned long wait;
3394
3395 /* Stall incoming I/O until we are done */
3396 scsi_block_requests(ha->host);
3397 clear_bit(AF_ONLINE, &ha->flags);
3398 clear_bit(AF_LINK_UP, &ha->flags);
3399
3400 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
3401
3402 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3403
3404 if ((is_qla8032(ha) || is_qla8042(ha)) &&
3405 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3406 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3407 __func__);
3408 /* disable pause frame for ISP83xx */
3409 qla4_83xx_disable_pause(ha);
3410 }
3411
3412 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3413
3414 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3415 reset_chip = 1;
3416
3417 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
3418 * do not reset adapter, jump to initialize_adapter */
3419 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3420 status = QLA_SUCCESS;
3421 goto recover_ha_init_adapter;
3422 }
3423
3424 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
3425 * from eh_host_reset or ioctl module */
3426 if (is_qla80XX(ha) && !reset_chip &&
3427 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3428
3429 DEBUG2(ql4_printk(KERN_INFO, ha,
3430 "scsi%ld: %s - Performing stop_firmware...\n",
3431 ha->host_no, __func__));
3432 status = ha->isp_ops->reset_firmware(ha);
3433 if (status == QLA_SUCCESS) {
3434 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3435 qla4xxx_cmd_wait(ha);
3436
3437 ha->isp_ops->disable_intrs(ha);
3438 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3439 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3440 } else {
3441 /* If the stop_firmware fails then
3442 * reset the entire chip */
3443 reset_chip = 1;
3444 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3445 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3446 }
3447 }
3448
3449 /* Issue full chip reset if recovering from a catastrophic error,
3450 * or if stop_firmware fails for ISP-8xxx.
3451 * This is the default case for ISP-4xxx */
3452 if (is_qla40XX(ha) || reset_chip) {
3453 if (is_qla40XX(ha))
3454 goto chip_reset;
3455
3456 /* Check if 8XXX firmware is alive or not
3457 * We may have arrived here from NEED_RESET
3458 * detection only */
3459 if (test_bit(AF_FW_RECOVERY, &ha->flags))
3460 goto chip_reset;
3461
3462 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
3463 while (time_before(jiffies, wait)) {
3464 if (qla4_8xxx_check_fw_alive(ha)) {
3465 qla4xxx_mailbox_premature_completion(ha);
3466 break;
3467 }
3468
3469 set_current_state(TASK_UNINTERRUPTIBLE);
3470 schedule_timeout(HZ);
3471 }
3472 chip_reset:
3473 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3474 qla4xxx_cmd_wait(ha);
3475
3476 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3477 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3478 DEBUG2(ql4_printk(KERN_INFO, ha,
3479 "scsi%ld: %s - Performing chip reset..\n",
3480 ha->host_no, __func__));
3481 status = ha->isp_ops->reset_chip(ha);
3482 }
3483
3484 /* Flush any pending ddb changed AENs */
3485 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3486
3487 recover_ha_init_adapter:
3488 /* Upon successful firmware/chip reset, re-initialize the adapter */
3489 if (status == QLA_SUCCESS) {
3490 /* For ISP-4xxx, force function 1 to always initialize
3491 * before function 3 to prevent both funcions from
3492 * stepping on top of the other */
3493 if (is_qla40XX(ha) && (ha->mac_index == 3))
3494 ssleep(6);
3495
3496 /* NOTE: AF_ONLINE flag set upon successful completion of
3497 * qla4xxx_initialize_adapter */
3498 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3499 }
3500
3501 /* Retry failed adapter initialization, if necessary
3502 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3503 * case to prevent ping-pong resets between functions */
3504 if (!test_bit(AF_ONLINE, &ha->flags) &&
3505 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3506 /* Adapter initialization failed, see if we can retry
3507 * resetting the ha.
3508 * Since we don't want to block the DPC for too long
3509 * with multiple resets in the same thread,
3510 * utilize DPC to retry */
3511 if (is_qla80XX(ha)) {
3512 ha->isp_ops->idc_lock(ha);
3513 dev_state = qla4_8xxx_rd_direct(ha,
3514 QLA8XXX_CRB_DEV_STATE);
3515 ha->isp_ops->idc_unlock(ha);
3516 if (dev_state == QLA8XXX_DEV_FAILED) {
3517 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3518 "recover adapter. H/W is in Failed "
3519 "state\n", __func__);
3520 qla4xxx_dead_adapter_cleanup(ha);
3521 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3522 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3523 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3524 &ha->dpc_flags);
3525 status = QLA_ERROR;
3526
3527 goto exit_recover;
3528 }
3529 }
3530
3531 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3532 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3533 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3534 "(%d) more times\n", ha->host_no,
3535 ha->retry_reset_ha_cnt));
3536 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3537 status = QLA_ERROR;
3538 } else {
3539 if (ha->retry_reset_ha_cnt > 0) {
3540 /* Schedule another Reset HA--DPC will retry */
3541 ha->retry_reset_ha_cnt--;
3542 DEBUG2(printk("scsi%ld: recover adapter - "
3543 "retry remaining %d\n",
3544 ha->host_no,
3545 ha->retry_reset_ha_cnt));
3546 status = QLA_ERROR;
3547 }
3548
3549 if (ha->retry_reset_ha_cnt == 0) {
3550 /* Recover adapter retries have been exhausted.
3551 * Adapter DEAD */
3552 DEBUG2(printk("scsi%ld: recover adapter "
3553 "failed - board disabled\n",
3554 ha->host_no));
3555 qla4xxx_dead_adapter_cleanup(ha);
3556 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3557 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3558 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3559 &ha->dpc_flags);
3560 status = QLA_ERROR;
3561 }
3562 }
3563 } else {
3564 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3565 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3566 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3567 }
3568
3569 exit_recover:
3570 ha->adapter_error_count++;
3571
3572 if (test_bit(AF_ONLINE, &ha->flags))
3573 ha->isp_ops->enable_intrs(ha);
3574
3575 scsi_unblock_requests(ha->host);
3576
3577 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3578 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3579 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3580
3581 return status;
3582 }
3583
3584 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3585 {
3586 struct iscsi_session *sess;
3587 struct ddb_entry *ddb_entry;
3588 struct scsi_qla_host *ha;
3589
3590 sess = cls_session->dd_data;
3591 ddb_entry = sess->dd_data;
3592 ha = ddb_entry->ha;
3593 if (!iscsi_is_session_online(cls_session)) {
3594 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3595 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3596 " unblock session\n", ha->host_no, __func__,
3597 ddb_entry->fw_ddb_index);
3598 iscsi_unblock_session(ddb_entry->sess);
3599 } else {
3600 /* Trigger relogin */
3601 if (ddb_entry->ddb_type == FLASH_DDB) {
3602 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
3603 test_bit(DF_DISABLE_RELOGIN,
3604 &ddb_entry->flags)))
3605 qla4xxx_arm_relogin_timer(ddb_entry);
3606 } else
3607 iscsi_session_failure(cls_session->dd_data,
3608 ISCSI_ERR_CONN_FAILED);
3609 }
3610 }
3611 }
3612
3613 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3614 {
3615 struct iscsi_session *sess;
3616 struct ddb_entry *ddb_entry;
3617 struct scsi_qla_host *ha;
3618
3619 sess = cls_session->dd_data;
3620 ddb_entry = sess->dd_data;
3621 ha = ddb_entry->ha;
3622 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3623 " unblock session\n", ha->host_no, __func__,
3624 ddb_entry->fw_ddb_index);
3625
3626 iscsi_unblock_session(ddb_entry->sess);
3627
3628 /* Start scan target */
3629 if (test_bit(AF_ONLINE, &ha->flags)) {
3630 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3631 " start scan\n", ha->host_no, __func__,
3632 ddb_entry->fw_ddb_index);
3633 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3634 }
3635 return QLA_SUCCESS;
3636 }
3637
3638 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3639 {
3640 struct iscsi_session *sess;
3641 struct ddb_entry *ddb_entry;
3642 struct scsi_qla_host *ha;
3643 int status = QLA_SUCCESS;
3644
3645 sess = cls_session->dd_data;
3646 ddb_entry = sess->dd_data;
3647 ha = ddb_entry->ha;
3648 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3649 " unblock user space session\n", ha->host_no, __func__,
3650 ddb_entry->fw_ddb_index);
3651
3652 if (!iscsi_is_session_online(cls_session)) {
3653 iscsi_conn_start(ddb_entry->conn);
3654 iscsi_conn_login_event(ddb_entry->conn,
3655 ISCSI_CONN_STATE_LOGGED_IN);
3656 } else {
3657 ql4_printk(KERN_INFO, ha,
3658 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3659 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3660 cls_session->sid);
3661 status = QLA_ERROR;
3662 }
3663
3664 return status;
3665 }
3666
3667 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3668 {
3669 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3670 }
3671
3672 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3673 {
3674 uint16_t relogin_timer;
3675 struct iscsi_session *sess;
3676 struct ddb_entry *ddb_entry;
3677 struct scsi_qla_host *ha;
3678
3679 sess = cls_sess->dd_data;
3680 ddb_entry = sess->dd_data;
3681 ha = ddb_entry->ha;
3682
3683 relogin_timer = max(ddb_entry->default_relogin_timeout,
3684 (uint16_t)RELOGIN_TOV);
3685 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3686
3687 DEBUG2(ql4_printk(KERN_INFO, ha,
3688 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3689 ddb_entry->fw_ddb_index, relogin_timer));
3690
3691 qla4xxx_login_flash_ddb(cls_sess);
3692 }
3693
3694 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3695 {
3696 struct iscsi_session *sess;
3697 struct ddb_entry *ddb_entry;
3698 struct scsi_qla_host *ha;
3699
3700 sess = cls_sess->dd_data;
3701 ddb_entry = sess->dd_data;
3702 ha = ddb_entry->ha;
3703
3704 if (!(ddb_entry->ddb_type == FLASH_DDB))
3705 return;
3706
3707 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
3708 return;
3709
3710 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3711 !iscsi_is_session_online(cls_sess)) {
3712 DEBUG2(ql4_printk(KERN_INFO, ha,
3713 "relogin issued\n"));
3714 qla4xxx_relogin_flash_ddb(cls_sess);
3715 }
3716 }
3717
3718 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3719 {
3720 if (ha->dpc_thread)
3721 queue_work(ha->dpc_thread, &ha->dpc_work);
3722 }
3723
3724 static struct qla4_work_evt *
3725 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3726 enum qla4_work_type type)
3727 {
3728 struct qla4_work_evt *e;
3729 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3730
3731 e = kzalloc(size, GFP_ATOMIC);
3732 if (!e)
3733 return NULL;
3734
3735 INIT_LIST_HEAD(&e->list);
3736 e->type = type;
3737 return e;
3738 }
3739
3740 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3741 struct qla4_work_evt *e)
3742 {
3743 unsigned long flags;
3744
3745 spin_lock_irqsave(&ha->work_lock, flags);
3746 list_add_tail(&e->list, &ha->work_list);
3747 spin_unlock_irqrestore(&ha->work_lock, flags);
3748 qla4xxx_wake_dpc(ha);
3749 }
3750
3751 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3752 enum iscsi_host_event_code aen_code,
3753 uint32_t data_size, uint8_t *data)
3754 {
3755 struct qla4_work_evt *e;
3756
3757 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3758 if (!e)
3759 return QLA_ERROR;
3760
3761 e->u.aen.code = aen_code;
3762 e->u.aen.data_size = data_size;
3763 memcpy(e->u.aen.data, data, data_size);
3764
3765 qla4xxx_post_work(ha, e);
3766
3767 return QLA_SUCCESS;
3768 }
3769
3770 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3771 uint32_t status, uint32_t pid,
3772 uint32_t data_size, uint8_t *data)
3773 {
3774 struct qla4_work_evt *e;
3775
3776 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3777 if (!e)
3778 return QLA_ERROR;
3779
3780 e->u.ping.status = status;
3781 e->u.ping.pid = pid;
3782 e->u.ping.data_size = data_size;
3783 memcpy(e->u.ping.data, data, data_size);
3784
3785 qla4xxx_post_work(ha, e);
3786
3787 return QLA_SUCCESS;
3788 }
3789
3790 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3791 {
3792 struct qla4_work_evt *e, *tmp;
3793 unsigned long flags;
3794 LIST_HEAD(work);
3795
3796 spin_lock_irqsave(&ha->work_lock, flags);
3797 list_splice_init(&ha->work_list, &work);
3798 spin_unlock_irqrestore(&ha->work_lock, flags);
3799
3800 list_for_each_entry_safe(e, tmp, &work, list) {
3801 list_del_init(&e->list);
3802
3803 switch (e->type) {
3804 case QLA4_EVENT_AEN:
3805 iscsi_post_host_event(ha->host_no,
3806 &qla4xxx_iscsi_transport,
3807 e->u.aen.code,
3808 e->u.aen.data_size,
3809 e->u.aen.data);
3810 break;
3811 case QLA4_EVENT_PING_STATUS:
3812 iscsi_ping_comp_event(ha->host_no,
3813 &qla4xxx_iscsi_transport,
3814 e->u.ping.status,
3815 e->u.ping.pid,
3816 e->u.ping.data_size,
3817 e->u.ping.data);
3818 break;
3819 default:
3820 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3821 "supported", e->type);
3822 }
3823 kfree(e);
3824 }
3825 }
3826
3827 /**
3828 * qla4xxx_do_dpc - dpc routine
3829 * @data: in our case pointer to adapter structure
3830 *
3831 * This routine is a task that is schedule by the interrupt handler
3832 * to perform the background processing for interrupts. We put it
3833 * on a task queue that is consumed whenever the scheduler runs; that's
3834 * so you can do anything (i.e. put the process to sleep etc). In fact,
3835 * the mid-level tries to sleep when it reaches the driver threshold
3836 * "host->can_queue". This can cause a panic if we were in our interrupt code.
3837 **/
3838 static void qla4xxx_do_dpc(struct work_struct *work)
3839 {
3840 struct scsi_qla_host *ha =
3841 container_of(work, struct scsi_qla_host, dpc_work);
3842 int status = QLA_ERROR;
3843
3844 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3845 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3846 ha->host_no, __func__, ha->flags, ha->dpc_flags))
3847
3848 /* Initialization not yet finished. Don't do anything yet. */
3849 if (!test_bit(AF_INIT_DONE, &ha->flags))
3850 return;
3851
3852 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3853 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3854 ha->host_no, __func__, ha->flags));
3855 return;
3856 }
3857
3858 /* post events to application */
3859 qla4xxx_do_work(ha);
3860
3861 if (is_qla80XX(ha)) {
3862 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3863 if (is_qla8032(ha) || is_qla8042(ha)) {
3864 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3865 __func__);
3866 /* disable pause frame for ISP83xx */
3867 qla4_83xx_disable_pause(ha);
3868 }
3869
3870 ha->isp_ops->idc_lock(ha);
3871 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3872 QLA8XXX_DEV_FAILED);
3873 ha->isp_ops->idc_unlock(ha);
3874 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3875 qla4_8xxx_device_state_handler(ha);
3876 }
3877
3878 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
3879 if (is_qla8042(ha)) {
3880 if (ha->idc_info.info2 &
3881 ENABLE_INTERNAL_LOOPBACK) {
3882 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
3883 __func__);
3884 status = qla4_84xx_config_acb(ha,
3885 ACB_CONFIG_DISABLE);
3886 if (status != QLA_SUCCESS) {
3887 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
3888 __func__);
3889 }
3890 }
3891 }
3892 qla4_83xx_post_idc_ack(ha);
3893 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
3894 }
3895
3896 if (is_qla8042(ha) &&
3897 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
3898 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
3899 __func__);
3900 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
3901 QLA_SUCCESS) {
3902 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
3903 __func__);
3904 }
3905 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
3906 }
3907
3908 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3909 qla4_8xxx_need_qsnt_handler(ha);
3910 }
3911 }
3912
3913 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3914 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3915 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3916 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3917 if ((is_qla8022(ha) && ql4xdontresethba) ||
3918 ((is_qla8032(ha) || is_qla8042(ha)) &&
3919 qla4_83xx_idc_dontreset(ha))) {
3920 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3921 ha->host_no, __func__));
3922 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3923 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3924 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3925 goto dpc_post_reset_ha;
3926 }
3927 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3928 test_bit(DPC_RESET_HA, &ha->dpc_flags))
3929 qla4xxx_recover_adapter(ha);
3930
3931 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3932 uint8_t wait_time = RESET_INTR_TOV;
3933
3934 while ((readw(&ha->reg->ctrl_status) &
3935 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3936 if (--wait_time == 0)
3937 break;
3938 msleep(1000);
3939 }
3940 if (wait_time == 0)
3941 DEBUG2(printk("scsi%ld: %s: SR|FSR "
3942 "bit not cleared-- resetting\n",
3943 ha->host_no, __func__));
3944 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3945 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3946 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3947 status = qla4xxx_recover_adapter(ha);
3948 }
3949 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3950 if (status == QLA_SUCCESS)
3951 ha->isp_ops->enable_intrs(ha);
3952 }
3953 }
3954
3955 dpc_post_reset_ha:
3956 /* ---- process AEN? --- */
3957 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3958 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3959
3960 /* ---- Get DHCP IP Address? --- */
3961 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3962 qla4xxx_get_dhcp_ip_address(ha);
3963
3964 /* ---- relogin device? --- */
3965 if (adapter_up(ha) &&
3966 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3967 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3968 }
3969
3970 /* ---- link change? --- */
3971 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
3972 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3973 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3974 /* ---- link down? --- */
3975 qla4xxx_mark_all_devices_missing(ha);
3976 } else {
3977 /* ---- link up? --- *
3978 * F/W will auto login to all devices ONLY ONCE after
3979 * link up during driver initialization and runtime
3980 * fatal error recovery. Therefore, the driver must
3981 * manually relogin to devices when recovering from
3982 * connection failures, logouts, expired KATO, etc. */
3983 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3984 qla4xxx_build_ddb_list(ha, ha->is_reset);
3985 iscsi_host_for_each_session(ha->host,
3986 qla4xxx_login_flash_ddb);
3987 } else
3988 qla4xxx_relogin_all_devices(ha);
3989 }
3990 }
3991 }
3992
3993 /**
3994 * qla4xxx_free_adapter - release the adapter
3995 * @ha: pointer to adapter structure
3996 **/
3997 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3998 {
3999 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4000
4001 /* Turn-off interrupts on the card. */
4002 ha->isp_ops->disable_intrs(ha);
4003
4004 if (is_qla40XX(ha)) {
4005 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
4006 &ha->reg->ctrl_status);
4007 readl(&ha->reg->ctrl_status);
4008 } else if (is_qla8022(ha)) {
4009 writel(0, &ha->qla4_82xx_reg->host_int);
4010 readl(&ha->qla4_82xx_reg->host_int);
4011 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4012 writel(0, &ha->qla4_83xx_reg->risc_intr);
4013 readl(&ha->qla4_83xx_reg->risc_intr);
4014 }
4015
4016 /* Remove timer thread, if present */
4017 if (ha->timer_active)
4018 qla4xxx_stop_timer(ha);
4019
4020 /* Kill the kernel thread for this host */
4021 if (ha->dpc_thread)
4022 destroy_workqueue(ha->dpc_thread);
4023
4024 /* Kill the kernel thread for this host */
4025 if (ha->task_wq)
4026 destroy_workqueue(ha->task_wq);
4027
4028 /* Put firmware in known state */
4029 ha->isp_ops->reset_firmware(ha);
4030
4031 if (is_qla80XX(ha)) {
4032 ha->isp_ops->idc_lock(ha);
4033 qla4_8xxx_clear_drv_active(ha);
4034 ha->isp_ops->idc_unlock(ha);
4035 }
4036
4037 /* Detach interrupts */
4038 qla4xxx_free_irqs(ha);
4039
4040 /* free extra memory */
4041 qla4xxx_mem_free(ha);
4042 }
4043
4044 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
4045 {
4046 int status = 0;
4047 unsigned long mem_base, mem_len, db_base, db_len;
4048 struct pci_dev *pdev = ha->pdev;
4049
4050 status = pci_request_regions(pdev, DRIVER_NAME);
4051 if (status) {
4052 printk(KERN_WARNING
4053 "scsi(%ld) Failed to reserve PIO regions (%s) "
4054 "status=%d\n", ha->host_no, pci_name(pdev), status);
4055 goto iospace_error_exit;
4056 }
4057
4058 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
4059 __func__, pdev->revision));
4060 ha->revision_id = pdev->revision;
4061
4062 /* remap phys address */
4063 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
4064 mem_len = pci_resource_len(pdev, 0);
4065 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
4066 __func__, mem_base, mem_len));
4067
4068 /* mapping of pcibase pointer */
4069 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
4070 if (!ha->nx_pcibase) {
4071 printk(KERN_ERR
4072 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
4073 pci_release_regions(ha->pdev);
4074 goto iospace_error_exit;
4075 }
4076
4077 /* Mapping of IO base pointer, door bell read and write pointer */
4078
4079 /* mapping of IO base pointer */
4080 if (is_qla8022(ha)) {
4081 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
4082 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
4083 (ha->pdev->devfn << 11));
4084 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
4085 QLA82XX_CAM_RAM_DB2);
4086 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4087 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
4088 ((uint8_t *)ha->nx_pcibase);
4089 }
4090
4091 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
4092 db_len = pci_resource_len(pdev, 4);
4093
4094 return 0;
4095 iospace_error_exit:
4096 return -ENOMEM;
4097 }
4098
4099 /***
4100 * qla4xxx_iospace_config - maps registers
4101 * @ha: pointer to adapter structure
4102 *
4103 * This routines maps HBA's registers from the pci address space
4104 * into the kernel virtual address space for memory mapped i/o.
4105 **/
4106 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
4107 {
4108 unsigned long pio, pio_len, pio_flags;
4109 unsigned long mmio, mmio_len, mmio_flags;
4110
4111 pio = pci_resource_start(ha->pdev, 0);
4112 pio_len = pci_resource_len(ha->pdev, 0);
4113 pio_flags = pci_resource_flags(ha->pdev, 0);
4114 if (pio_flags & IORESOURCE_IO) {
4115 if (pio_len < MIN_IOBASE_LEN) {
4116 ql4_printk(KERN_WARNING, ha,
4117 "Invalid PCI I/O region size\n");
4118 pio = 0;
4119 }
4120 } else {
4121 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
4122 pio = 0;
4123 }
4124
4125 /* Use MMIO operations for all accesses. */
4126 mmio = pci_resource_start(ha->pdev, 1);
4127 mmio_len = pci_resource_len(ha->pdev, 1);
4128 mmio_flags = pci_resource_flags(ha->pdev, 1);
4129
4130 if (!(mmio_flags & IORESOURCE_MEM)) {
4131 ql4_printk(KERN_ERR, ha,
4132 "region #0 not an MMIO resource, aborting\n");
4133
4134 goto iospace_error_exit;
4135 }
4136
4137 if (mmio_len < MIN_IOBASE_LEN) {
4138 ql4_printk(KERN_ERR, ha,
4139 "Invalid PCI mem region size, aborting\n");
4140 goto iospace_error_exit;
4141 }
4142
4143 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
4144 ql4_printk(KERN_WARNING, ha,
4145 "Failed to reserve PIO/MMIO regions\n");
4146
4147 goto iospace_error_exit;
4148 }
4149
4150 ha->pio_address = pio;
4151 ha->pio_length = pio_len;
4152 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
4153 if (!ha->reg) {
4154 ql4_printk(KERN_ERR, ha,
4155 "cannot remap MMIO, aborting\n");
4156
4157 goto iospace_error_exit;
4158 }
4159
4160 return 0;
4161
4162 iospace_error_exit:
4163 return -ENOMEM;
4164 }
4165
4166 static struct isp_operations qla4xxx_isp_ops = {
4167 .iospace_config = qla4xxx_iospace_config,
4168 .pci_config = qla4xxx_pci_config,
4169 .disable_intrs = qla4xxx_disable_intrs,
4170 .enable_intrs = qla4xxx_enable_intrs,
4171 .start_firmware = qla4xxx_start_firmware,
4172 .intr_handler = qla4xxx_intr_handler,
4173 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
4174 .reset_chip = qla4xxx_soft_reset,
4175 .reset_firmware = qla4xxx_hw_reset,
4176 .queue_iocb = qla4xxx_queue_iocb,
4177 .complete_iocb = qla4xxx_complete_iocb,
4178 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
4179 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
4180 .get_sys_info = qla4xxx_get_sys_info,
4181 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
4182 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
4183 };
4184
4185 static struct isp_operations qla4_82xx_isp_ops = {
4186 .iospace_config = qla4_8xxx_iospace_config,
4187 .pci_config = qla4_8xxx_pci_config,
4188 .disable_intrs = qla4_82xx_disable_intrs,
4189 .enable_intrs = qla4_82xx_enable_intrs,
4190 .start_firmware = qla4_8xxx_load_risc,
4191 .restart_firmware = qla4_82xx_try_start_fw,
4192 .intr_handler = qla4_82xx_intr_handler,
4193 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
4194 .need_reset = qla4_8xxx_need_reset,
4195 .reset_chip = qla4_82xx_isp_reset,
4196 .reset_firmware = qla4_8xxx_stop_firmware,
4197 .queue_iocb = qla4_82xx_queue_iocb,
4198 .complete_iocb = qla4_82xx_complete_iocb,
4199 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
4200 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
4201 .get_sys_info = qla4_8xxx_get_sys_info,
4202 .rd_reg_direct = qla4_82xx_rd_32,
4203 .wr_reg_direct = qla4_82xx_wr_32,
4204 .rd_reg_indirect = qla4_82xx_md_rd_32,
4205 .wr_reg_indirect = qla4_82xx_md_wr_32,
4206 .idc_lock = qla4_82xx_idc_lock,
4207 .idc_unlock = qla4_82xx_idc_unlock,
4208 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
4209 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
4210 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
4211 };
4212
4213 static struct isp_operations qla4_83xx_isp_ops = {
4214 .iospace_config = qla4_8xxx_iospace_config,
4215 .pci_config = qla4_8xxx_pci_config,
4216 .disable_intrs = qla4_83xx_disable_intrs,
4217 .enable_intrs = qla4_83xx_enable_intrs,
4218 .start_firmware = qla4_8xxx_load_risc,
4219 .restart_firmware = qla4_83xx_start_firmware,
4220 .intr_handler = qla4_83xx_intr_handler,
4221 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
4222 .need_reset = qla4_8xxx_need_reset,
4223 .reset_chip = qla4_83xx_isp_reset,
4224 .reset_firmware = qla4_8xxx_stop_firmware,
4225 .queue_iocb = qla4_83xx_queue_iocb,
4226 .complete_iocb = qla4_83xx_complete_iocb,
4227 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
4228 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
4229 .get_sys_info = qla4_8xxx_get_sys_info,
4230 .rd_reg_direct = qla4_83xx_rd_reg,
4231 .wr_reg_direct = qla4_83xx_wr_reg,
4232 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
4233 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
4234 .idc_lock = qla4_83xx_drv_lock,
4235 .idc_unlock = qla4_83xx_drv_unlock,
4236 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
4237 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
4238 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
4239 };
4240
4241 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4242 {
4243 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
4244 }
4245
4246 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4247 {
4248 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
4249 }
4250
4251 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4252 {
4253 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
4254 }
4255
4256 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4257 {
4258 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
4259 }
4260
4261 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
4262 {
4263 struct scsi_qla_host *ha = data;
4264 char *str = buf;
4265 int rc;
4266
4267 switch (type) {
4268 case ISCSI_BOOT_ETH_FLAGS:
4269 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4270 break;
4271 case ISCSI_BOOT_ETH_INDEX:
4272 rc = sprintf(str, "0\n");
4273 break;
4274 case ISCSI_BOOT_ETH_MAC:
4275 rc = sysfs_format_mac(str, ha->my_mac,
4276 MAC_ADDR_LEN);
4277 break;
4278 default:
4279 rc = -ENOSYS;
4280 break;
4281 }
4282 return rc;
4283 }
4284
4285 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
4286 {
4287 int rc;
4288
4289 switch (type) {
4290 case ISCSI_BOOT_ETH_FLAGS:
4291 case ISCSI_BOOT_ETH_MAC:
4292 case ISCSI_BOOT_ETH_INDEX:
4293 rc = S_IRUGO;
4294 break;
4295 default:
4296 rc = 0;
4297 break;
4298 }
4299 return rc;
4300 }
4301
4302 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
4303 {
4304 struct scsi_qla_host *ha = data;
4305 char *str = buf;
4306 int rc;
4307
4308 switch (type) {
4309 case ISCSI_BOOT_INI_INITIATOR_NAME:
4310 rc = sprintf(str, "%s\n", ha->name_string);
4311 break;
4312 default:
4313 rc = -ENOSYS;
4314 break;
4315 }
4316 return rc;
4317 }
4318
4319 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
4320 {
4321 int rc;
4322
4323 switch (type) {
4324 case ISCSI_BOOT_INI_INITIATOR_NAME:
4325 rc = S_IRUGO;
4326 break;
4327 default:
4328 rc = 0;
4329 break;
4330 }
4331 return rc;
4332 }
4333
4334 static ssize_t
4335 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
4336 char *buf)
4337 {
4338 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4339 char *str = buf;
4340 int rc;
4341
4342 switch (type) {
4343 case ISCSI_BOOT_TGT_NAME:
4344 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
4345 break;
4346 case ISCSI_BOOT_TGT_IP_ADDR:
4347 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
4348 rc = sprintf(buf, "%pI4\n",
4349 &boot_conn->dest_ipaddr.ip_address);
4350 else
4351 rc = sprintf(str, "%pI6\n",
4352 &boot_conn->dest_ipaddr.ip_address);
4353 break;
4354 case ISCSI_BOOT_TGT_PORT:
4355 rc = sprintf(str, "%d\n", boot_conn->dest_port);
4356 break;
4357 case ISCSI_BOOT_TGT_CHAP_NAME:
4358 rc = sprintf(str, "%.*s\n",
4359 boot_conn->chap.target_chap_name_length,
4360 (char *)&boot_conn->chap.target_chap_name);
4361 break;
4362 case ISCSI_BOOT_TGT_CHAP_SECRET:
4363 rc = sprintf(str, "%.*s\n",
4364 boot_conn->chap.target_secret_length,
4365 (char *)&boot_conn->chap.target_secret);
4366 break;
4367 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4368 rc = sprintf(str, "%.*s\n",
4369 boot_conn->chap.intr_chap_name_length,
4370 (char *)&boot_conn->chap.intr_chap_name);
4371 break;
4372 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4373 rc = sprintf(str, "%.*s\n",
4374 boot_conn->chap.intr_secret_length,
4375 (char *)&boot_conn->chap.intr_secret);
4376 break;
4377 case ISCSI_BOOT_TGT_FLAGS:
4378 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4379 break;
4380 case ISCSI_BOOT_TGT_NIC_ASSOC:
4381 rc = sprintf(str, "0\n");
4382 break;
4383 default:
4384 rc = -ENOSYS;
4385 break;
4386 }
4387 return rc;
4388 }
4389
4390 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
4391 {
4392 struct scsi_qla_host *ha = data;
4393 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
4394
4395 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4396 }
4397
4398 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
4399 {
4400 struct scsi_qla_host *ha = data;
4401 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
4402
4403 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4404 }
4405
4406 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
4407 {
4408 int rc;
4409
4410 switch (type) {
4411 case ISCSI_BOOT_TGT_NAME:
4412 case ISCSI_BOOT_TGT_IP_ADDR:
4413 case ISCSI_BOOT_TGT_PORT:
4414 case ISCSI_BOOT_TGT_CHAP_NAME:
4415 case ISCSI_BOOT_TGT_CHAP_SECRET:
4416 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4417 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4418 case ISCSI_BOOT_TGT_NIC_ASSOC:
4419 case ISCSI_BOOT_TGT_FLAGS:
4420 rc = S_IRUGO;
4421 break;
4422 default:
4423 rc = 0;
4424 break;
4425 }
4426 return rc;
4427 }
4428
4429 static void qla4xxx_boot_release(void *data)
4430 {
4431 struct scsi_qla_host *ha = data;
4432
4433 scsi_host_put(ha->host);
4434 }
4435
4436 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4437 {
4438 dma_addr_t buf_dma;
4439 uint32_t addr, pri_addr, sec_addr;
4440 uint32_t offset;
4441 uint16_t func_num;
4442 uint8_t val;
4443 uint8_t *buf = NULL;
4444 size_t size = 13 * sizeof(uint8_t);
4445 int ret = QLA_SUCCESS;
4446
4447 func_num = PCI_FUNC(ha->pdev->devfn);
4448
4449 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
4450 __func__, ha->pdev->device, func_num);
4451
4452 if (is_qla40XX(ha)) {
4453 if (func_num == 1) {
4454 addr = NVRAM_PORT0_BOOT_MODE;
4455 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
4456 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
4457 } else if (func_num == 3) {
4458 addr = NVRAM_PORT1_BOOT_MODE;
4459 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
4460 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
4461 } else {
4462 ret = QLA_ERROR;
4463 goto exit_boot_info;
4464 }
4465
4466 /* Check Boot Mode */
4467 val = rd_nvram_byte(ha, addr);
4468 if (!(val & 0x07)) {
4469 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
4470 "options : 0x%x\n", __func__, val));
4471 ret = QLA_ERROR;
4472 goto exit_boot_info;
4473 }
4474
4475 /* get primary valid target index */
4476 val = rd_nvram_byte(ha, pri_addr);
4477 if (val & BIT_7)
4478 ddb_index[0] = (val & 0x7f);
4479
4480 /* get secondary valid target index */
4481 val = rd_nvram_byte(ha, sec_addr);
4482 if (val & BIT_7)
4483 ddb_index[1] = (val & 0x7f);
4484
4485 } else if (is_qla80XX(ha)) {
4486 buf = dma_alloc_coherent(&ha->pdev->dev, size,
4487 &buf_dma, GFP_KERNEL);
4488 if (!buf) {
4489 DEBUG2(ql4_printk(KERN_ERR, ha,
4490 "%s: Unable to allocate dma buffer\n",
4491 __func__));
4492 ret = QLA_ERROR;
4493 goto exit_boot_info;
4494 }
4495
4496 if (ha->port_num == 0)
4497 offset = BOOT_PARAM_OFFSET_PORT0;
4498 else if (ha->port_num == 1)
4499 offset = BOOT_PARAM_OFFSET_PORT1;
4500 else {
4501 ret = QLA_ERROR;
4502 goto exit_boot_info_free;
4503 }
4504 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
4505 offset;
4506 if (qla4xxx_get_flash(ha, buf_dma, addr,
4507 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
4508 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
4509 " failed\n", ha->host_no, __func__));
4510 ret = QLA_ERROR;
4511 goto exit_boot_info_free;
4512 }
4513 /* Check Boot Mode */
4514 if (!(buf[1] & 0x07)) {
4515 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
4516 " : 0x%x\n", buf[1]));
4517 ret = QLA_ERROR;
4518 goto exit_boot_info_free;
4519 }
4520
4521 /* get primary valid target index */
4522 if (buf[2] & BIT_7)
4523 ddb_index[0] = buf[2] & 0x7f;
4524
4525 /* get secondary valid target index */
4526 if (buf[11] & BIT_7)
4527 ddb_index[1] = buf[11] & 0x7f;
4528 } else {
4529 ret = QLA_ERROR;
4530 goto exit_boot_info;
4531 }
4532
4533 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
4534 " target ID %d\n", __func__, ddb_index[0],
4535 ddb_index[1]));
4536
4537 exit_boot_info_free:
4538 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
4539 exit_boot_info:
4540 ha->pri_ddb_idx = ddb_index[0];
4541 ha->sec_ddb_idx = ddb_index[1];
4542 return ret;
4543 }
4544
4545 /**
4546 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4547 * @ha: pointer to adapter structure
4548 * @username: CHAP username to be returned
4549 * @password: CHAP password to be returned
4550 *
4551 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4552 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4553 * So from the CHAP cache find the first BIDI CHAP entry and set it
4554 * to the boot record in sysfs.
4555 **/
4556 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4557 char *password)
4558 {
4559 int i, ret = -EINVAL;
4560 int max_chap_entries = 0;
4561 struct ql4_chap_table *chap_table;
4562
4563 if (is_qla80XX(ha))
4564 max_chap_entries = (ha->hw.flt_chap_size / 2) /
4565 sizeof(struct ql4_chap_table);
4566 else
4567 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4568
4569 if (!ha->chap_list) {
4570 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4571 return ret;
4572 }
4573
4574 mutex_lock(&ha->chap_sem);
4575 for (i = 0; i < max_chap_entries; i++) {
4576 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4577 if (chap_table->cookie !=
4578 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4579 continue;
4580 }
4581
4582 if (chap_table->flags & BIT_7) /* local */
4583 continue;
4584
4585 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4586 continue;
4587
4588 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4589 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4590 ret = 0;
4591 break;
4592 }
4593 mutex_unlock(&ha->chap_sem);
4594
4595 return ret;
4596 }
4597
4598
4599 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4600 struct ql4_boot_session_info *boot_sess,
4601 uint16_t ddb_index)
4602 {
4603 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4604 struct dev_db_entry *fw_ddb_entry;
4605 dma_addr_t fw_ddb_entry_dma;
4606 uint16_t idx;
4607 uint16_t options;
4608 int ret = QLA_SUCCESS;
4609
4610 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4611 &fw_ddb_entry_dma, GFP_KERNEL);
4612 if (!fw_ddb_entry) {
4613 DEBUG2(ql4_printk(KERN_ERR, ha,
4614 "%s: Unable to allocate dma buffer.\n",
4615 __func__));
4616 ret = QLA_ERROR;
4617 return ret;
4618 }
4619
4620 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4621 fw_ddb_entry_dma, ddb_index)) {
4622 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4623 "index [%d]\n", __func__, ddb_index));
4624 ret = QLA_ERROR;
4625 goto exit_boot_target;
4626 }
4627
4628 /* Update target name and IP from DDB */
4629 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4630 min(sizeof(boot_sess->target_name),
4631 sizeof(fw_ddb_entry->iscsi_name)));
4632
4633 options = le16_to_cpu(fw_ddb_entry->options);
4634 if (options & DDB_OPT_IPV6_DEVICE) {
4635 memcpy(&boot_conn->dest_ipaddr.ip_address,
4636 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4637 } else {
4638 boot_conn->dest_ipaddr.ip_type = 0x1;
4639 memcpy(&boot_conn->dest_ipaddr.ip_address,
4640 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4641 }
4642
4643 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4644
4645 /* update chap information */
4646 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4647
4648 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4649
4650 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4651
4652 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4653 target_chap_name,
4654 (char *)&boot_conn->chap.target_secret,
4655 idx);
4656 if (ret) {
4657 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4658 ret = QLA_ERROR;
4659 goto exit_boot_target;
4660 }
4661
4662 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4663 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4664 }
4665
4666 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4667
4668 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4669
4670 ret = qla4xxx_get_bidi_chap(ha,
4671 (char *)&boot_conn->chap.intr_chap_name,
4672 (char *)&boot_conn->chap.intr_secret);
4673
4674 if (ret) {
4675 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4676 ret = QLA_ERROR;
4677 goto exit_boot_target;
4678 }
4679
4680 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4681 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4682 }
4683
4684 exit_boot_target:
4685 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4686 fw_ddb_entry, fw_ddb_entry_dma);
4687 return ret;
4688 }
4689
4690 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4691 {
4692 uint16_t ddb_index[2];
4693 int ret = QLA_ERROR;
4694 int rval;
4695
4696 memset(ddb_index, 0, sizeof(ddb_index));
4697 ddb_index[0] = 0xffff;
4698 ddb_index[1] = 0xffff;
4699 ret = get_fw_boot_info(ha, ddb_index);
4700 if (ret != QLA_SUCCESS) {
4701 DEBUG2(ql4_printk(KERN_INFO, ha,
4702 "%s: No boot target configured.\n", __func__));
4703 return ret;
4704 }
4705
4706 if (ql4xdisablesysfsboot)
4707 return QLA_SUCCESS;
4708
4709 if (ddb_index[0] == 0xffff)
4710 goto sec_target;
4711
4712 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4713 ddb_index[0]);
4714 if (rval != QLA_SUCCESS) {
4715 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4716 "configured\n", __func__));
4717 } else
4718 ret = QLA_SUCCESS;
4719
4720 sec_target:
4721 if (ddb_index[1] == 0xffff)
4722 goto exit_get_boot_info;
4723
4724 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4725 ddb_index[1]);
4726 if (rval != QLA_SUCCESS) {
4727 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4728 " configured\n", __func__));
4729 } else
4730 ret = QLA_SUCCESS;
4731
4732 exit_get_boot_info:
4733 return ret;
4734 }
4735
4736 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4737 {
4738 struct iscsi_boot_kobj *boot_kobj;
4739
4740 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4741 return QLA_ERROR;
4742
4743 if (ql4xdisablesysfsboot) {
4744 ql4_printk(KERN_INFO, ha,
4745 "%s: syfsboot disabled - driver will trigger login "
4746 "and publish session for discovery .\n", __func__);
4747 return QLA_SUCCESS;
4748 }
4749
4750
4751 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4752 if (!ha->boot_kset)
4753 goto kset_free;
4754
4755 if (!scsi_host_get(ha->host))
4756 goto kset_free;
4757 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4758 qla4xxx_show_boot_tgt_pri_info,
4759 qla4xxx_tgt_get_attr_visibility,
4760 qla4xxx_boot_release);
4761 if (!boot_kobj)
4762 goto put_host;
4763
4764 if (!scsi_host_get(ha->host))
4765 goto kset_free;
4766 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4767 qla4xxx_show_boot_tgt_sec_info,
4768 qla4xxx_tgt_get_attr_visibility,
4769 qla4xxx_boot_release);
4770 if (!boot_kobj)
4771 goto put_host;
4772
4773 if (!scsi_host_get(ha->host))
4774 goto kset_free;
4775 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4776 qla4xxx_show_boot_ini_info,
4777 qla4xxx_ini_get_attr_visibility,
4778 qla4xxx_boot_release);
4779 if (!boot_kobj)
4780 goto put_host;
4781
4782 if (!scsi_host_get(ha->host))
4783 goto kset_free;
4784 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4785 qla4xxx_show_boot_eth_info,
4786 qla4xxx_eth_get_attr_visibility,
4787 qla4xxx_boot_release);
4788 if (!boot_kobj)
4789 goto put_host;
4790
4791 return QLA_SUCCESS;
4792
4793 put_host:
4794 scsi_host_put(ha->host);
4795 kset_free:
4796 iscsi_boot_destroy_kset(ha->boot_kset);
4797 return -ENOMEM;
4798 }
4799
4800
4801 /**
4802 * qla4xxx_create chap_list - Create CHAP list from FLASH
4803 * @ha: pointer to adapter structure
4804 *
4805 * Read flash and make a list of CHAP entries, during login when a CHAP entry
4806 * is received, it will be checked in this list. If entry exist then the CHAP
4807 * entry index is set in the DDB. If CHAP entry does not exist in this list
4808 * then a new entry is added in FLASH in CHAP table and the index obtained is
4809 * used in the DDB.
4810 **/
4811 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4812 {
4813 int rval = 0;
4814 uint8_t *chap_flash_data = NULL;
4815 uint32_t offset;
4816 dma_addr_t chap_dma;
4817 uint32_t chap_size = 0;
4818
4819 if (is_qla40XX(ha))
4820 chap_size = MAX_CHAP_ENTRIES_40XX *
4821 sizeof(struct ql4_chap_table);
4822 else /* Single region contains CHAP info for both
4823 * ports which is divided into half for each port.
4824 */
4825 chap_size = ha->hw.flt_chap_size / 2;
4826
4827 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4828 &chap_dma, GFP_KERNEL);
4829 if (!chap_flash_data) {
4830 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4831 return;
4832 }
4833 if (is_qla40XX(ha))
4834 offset = FLASH_CHAP_OFFSET;
4835 else {
4836 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4837 if (ha->port_num == 1)
4838 offset += chap_size;
4839 }
4840
4841 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4842 if (rval != QLA_SUCCESS)
4843 goto exit_chap_list;
4844
4845 if (ha->chap_list == NULL)
4846 ha->chap_list = vmalloc(chap_size);
4847 if (ha->chap_list == NULL) {
4848 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4849 goto exit_chap_list;
4850 }
4851
4852 memcpy(ha->chap_list, chap_flash_data, chap_size);
4853
4854 exit_chap_list:
4855 dma_free_coherent(&ha->pdev->dev, chap_size,
4856 chap_flash_data, chap_dma);
4857 }
4858
4859 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4860 struct ql4_tuple_ddb *tddb)
4861 {
4862 struct scsi_qla_host *ha;
4863 struct iscsi_cls_session *cls_sess;
4864 struct iscsi_cls_conn *cls_conn;
4865 struct iscsi_session *sess;
4866 struct iscsi_conn *conn;
4867
4868 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4869 ha = ddb_entry->ha;
4870 cls_sess = ddb_entry->sess;
4871 sess = cls_sess->dd_data;
4872 cls_conn = ddb_entry->conn;
4873 conn = cls_conn->dd_data;
4874
4875 tddb->tpgt = sess->tpgt;
4876 tddb->port = conn->persistent_port;
4877 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4878 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4879 }
4880
4881 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4882 struct ql4_tuple_ddb *tddb,
4883 uint8_t *flash_isid)
4884 {
4885 uint16_t options = 0;
4886
4887 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4888 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4889 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4890
4891 options = le16_to_cpu(fw_ddb_entry->options);
4892 if (options & DDB_OPT_IPV6_DEVICE)
4893 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4894 else
4895 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4896
4897 tddb->port = le16_to_cpu(fw_ddb_entry->port);
4898
4899 if (flash_isid == NULL)
4900 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4901 sizeof(tddb->isid));
4902 else
4903 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4904 }
4905
4906 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4907 struct ql4_tuple_ddb *old_tddb,
4908 struct ql4_tuple_ddb *new_tddb,
4909 uint8_t is_isid_compare)
4910 {
4911 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4912 return QLA_ERROR;
4913
4914 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4915 return QLA_ERROR;
4916
4917 if (old_tddb->port != new_tddb->port)
4918 return QLA_ERROR;
4919
4920 /* For multi sessions, driver generates the ISID, so do not compare
4921 * ISID in reset path since it would be a comparison between the
4922 * driver generated ISID and firmware generated ISID. This could
4923 * lead to adding duplicated DDBs in the list as driver generated
4924 * ISID would not match firmware generated ISID.
4925 */
4926 if (is_isid_compare) {
4927 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4928 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4929 __func__, old_tddb->isid[5], old_tddb->isid[4],
4930 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4931 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4932 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4933 new_tddb->isid[0]));
4934
4935 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4936 sizeof(old_tddb->isid)))
4937 return QLA_ERROR;
4938 }
4939
4940 DEBUG2(ql4_printk(KERN_INFO, ha,
4941 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4942 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4943 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4944 new_tddb->ip_addr, new_tddb->iscsi_name));
4945
4946 return QLA_SUCCESS;
4947 }
4948
4949 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4950 struct dev_db_entry *fw_ddb_entry,
4951 uint32_t *index)
4952 {
4953 struct ddb_entry *ddb_entry;
4954 struct ql4_tuple_ddb *fw_tddb = NULL;
4955 struct ql4_tuple_ddb *tmp_tddb = NULL;
4956 int idx;
4957 int ret = QLA_ERROR;
4958
4959 fw_tddb = vzalloc(sizeof(*fw_tddb));
4960 if (!fw_tddb) {
4961 DEBUG2(ql4_printk(KERN_WARNING, ha,
4962 "Memory Allocation failed.\n"));
4963 ret = QLA_SUCCESS;
4964 goto exit_check;
4965 }
4966
4967 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4968 if (!tmp_tddb) {
4969 DEBUG2(ql4_printk(KERN_WARNING, ha,
4970 "Memory Allocation failed.\n"));
4971 ret = QLA_SUCCESS;
4972 goto exit_check;
4973 }
4974
4975 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4976
4977 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4978 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4979 if (ddb_entry == NULL)
4980 continue;
4981
4982 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4983 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4984 ret = QLA_SUCCESS; /* found */
4985 if (index != NULL)
4986 *index = idx;
4987 goto exit_check;
4988 }
4989 }
4990
4991 exit_check:
4992 if (fw_tddb)
4993 vfree(fw_tddb);
4994 if (tmp_tddb)
4995 vfree(tmp_tddb);
4996 return ret;
4997 }
4998
4999 /**
5000 * qla4xxx_check_existing_isid - check if target with same isid exist
5001 * in target list
5002 * @list_nt: list of target
5003 * @isid: isid to check
5004 *
5005 * This routine return QLA_SUCCESS if target with same isid exist
5006 **/
5007 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
5008 {
5009 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
5010 struct dev_db_entry *fw_ddb_entry;
5011
5012 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5013 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
5014
5015 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
5016 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
5017 return QLA_SUCCESS;
5018 }
5019 }
5020 return QLA_ERROR;
5021 }
5022
5023 /**
5024 * qla4xxx_update_isid - compare ddbs and updated isid
5025 * @ha: Pointer to host adapter structure.
5026 * @list_nt: list of nt target
5027 * @fw_ddb_entry: firmware ddb entry
5028 *
5029 * This routine update isid if ddbs have same iqn, same isid and
5030 * different IP addr.
5031 * Return QLA_SUCCESS if isid is updated.
5032 **/
5033 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
5034 struct list_head *list_nt,
5035 struct dev_db_entry *fw_ddb_entry)
5036 {
5037 uint8_t base_value, i;
5038
5039 base_value = fw_ddb_entry->isid[1] & 0x1f;
5040 for (i = 0; i < 8; i++) {
5041 fw_ddb_entry->isid[1] = (base_value | (i << 5));
5042 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5043 break;
5044 }
5045
5046 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5047 return QLA_ERROR;
5048
5049 return QLA_SUCCESS;
5050 }
5051
5052 /**
5053 * qla4xxx_should_update_isid - check if isid need to update
5054 * @ha: Pointer to host adapter structure.
5055 * @old_tddb: ddb tuple
5056 * @new_tddb: ddb tuple
5057 *
5058 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
5059 * same isid
5060 **/
5061 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
5062 struct ql4_tuple_ddb *old_tddb,
5063 struct ql4_tuple_ddb *new_tddb)
5064 {
5065 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
5066 /* Same ip */
5067 if (old_tddb->port == new_tddb->port)
5068 return QLA_ERROR;
5069 }
5070
5071 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5072 /* different iqn */
5073 return QLA_ERROR;
5074
5075 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5076 sizeof(old_tddb->isid)))
5077 /* different isid */
5078 return QLA_ERROR;
5079
5080 return QLA_SUCCESS;
5081 }
5082
5083 /**
5084 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
5085 * @ha: Pointer to host adapter structure.
5086 * @list_nt: list of nt target.
5087 * @fw_ddb_entry: firmware ddb entry.
5088 *
5089 * This routine check if fw_ddb_entry already exists in list_nt to avoid
5090 * duplicate ddb in list_nt.
5091 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
5092 * Note: This function also update isid of DDB if required.
5093 **/
5094
5095 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
5096 struct list_head *list_nt,
5097 struct dev_db_entry *fw_ddb_entry)
5098 {
5099 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
5100 struct ql4_tuple_ddb *fw_tddb = NULL;
5101 struct ql4_tuple_ddb *tmp_tddb = NULL;
5102 int rval, ret = QLA_ERROR;
5103
5104 fw_tddb = vzalloc(sizeof(*fw_tddb));
5105 if (!fw_tddb) {
5106 DEBUG2(ql4_printk(KERN_WARNING, ha,
5107 "Memory Allocation failed.\n"));
5108 ret = QLA_SUCCESS;
5109 goto exit_check;
5110 }
5111
5112 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5113 if (!tmp_tddb) {
5114 DEBUG2(ql4_printk(KERN_WARNING, ha,
5115 "Memory Allocation failed.\n"));
5116 ret = QLA_SUCCESS;
5117 goto exit_check;
5118 }
5119
5120 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5121
5122 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5123 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
5124 nt_ddb_idx->flash_isid);
5125 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
5126 /* found duplicate ddb */
5127 if (ret == QLA_SUCCESS)
5128 goto exit_check;
5129 }
5130
5131 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5132 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
5133
5134 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
5135 if (ret == QLA_SUCCESS) {
5136 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
5137 if (rval == QLA_SUCCESS)
5138 ret = QLA_ERROR;
5139 else
5140 ret = QLA_SUCCESS;
5141
5142 goto exit_check;
5143 }
5144 }
5145
5146 exit_check:
5147 if (fw_tddb)
5148 vfree(fw_tddb);
5149 if (tmp_tddb)
5150 vfree(tmp_tddb);
5151 return ret;
5152 }
5153
5154 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
5155 {
5156 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5157
5158 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5159 list_del_init(&ddb_idx->list);
5160 vfree(ddb_idx);
5161 }
5162 }
5163
5164 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
5165 struct dev_db_entry *fw_ddb_entry)
5166 {
5167 struct iscsi_endpoint *ep;
5168 struct sockaddr_in *addr;
5169 struct sockaddr_in6 *addr6;
5170 struct sockaddr *t_addr;
5171 struct sockaddr_storage *dst_addr;
5172 char *ip;
5173
5174 /* TODO: need to destroy on unload iscsi_endpoint*/
5175 dst_addr = vmalloc(sizeof(*dst_addr));
5176 if (!dst_addr)
5177 return NULL;
5178
5179 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
5180 t_addr = (struct sockaddr *)dst_addr;
5181 t_addr->sa_family = AF_INET6;
5182 addr6 = (struct sockaddr_in6 *)dst_addr;
5183 ip = (char *)&addr6->sin6_addr;
5184 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
5185 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
5186
5187 } else {
5188 t_addr = (struct sockaddr *)dst_addr;
5189 t_addr->sa_family = AF_INET;
5190 addr = (struct sockaddr_in *)dst_addr;
5191 ip = (char *)&addr->sin_addr;
5192 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
5193 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
5194 }
5195
5196 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
5197 vfree(dst_addr);
5198 return ep;
5199 }
5200
5201 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
5202 {
5203 if (ql4xdisablesysfsboot)
5204 return QLA_SUCCESS;
5205 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
5206 return QLA_ERROR;
5207 return QLA_SUCCESS;
5208 }
5209
5210 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
5211 struct ddb_entry *ddb_entry,
5212 uint16_t idx)
5213 {
5214 uint16_t def_timeout;
5215
5216 ddb_entry->ddb_type = FLASH_DDB;
5217 ddb_entry->fw_ddb_index = INVALID_ENTRY;
5218 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
5219 ddb_entry->ha = ha;
5220 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
5221 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
5222 ddb_entry->chap_tbl_idx = INVALID_ENTRY;
5223
5224 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
5225 atomic_set(&ddb_entry->relogin_timer, 0);
5226 atomic_set(&ddb_entry->relogin_retry_count, 0);
5227 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
5228 ddb_entry->default_relogin_timeout =
5229 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
5230 def_timeout : LOGIN_TOV;
5231 ddb_entry->default_time2wait =
5232 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
5233
5234 if (ql4xdisablesysfsboot &&
5235 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
5236 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
5237 }
5238
5239 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
5240 {
5241 uint32_t idx = 0;
5242 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
5243 uint32_t sts[MBOX_REG_COUNT];
5244 uint32_t ip_state;
5245 unsigned long wtime;
5246 int ret;
5247
5248 wtime = jiffies + (HZ * IP_CONFIG_TOV);
5249 do {
5250 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
5251 if (ip_idx[idx] == -1)
5252 continue;
5253
5254 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
5255
5256 if (ret == QLA_ERROR) {
5257 ip_idx[idx] = -1;
5258 continue;
5259 }
5260
5261 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
5262
5263 DEBUG2(ql4_printk(KERN_INFO, ha,
5264 "Waiting for IP state for idx = %d, state = 0x%x\n",
5265 ip_idx[idx], ip_state));
5266 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
5267 ip_state == IP_ADDRSTATE_INVALID ||
5268 ip_state == IP_ADDRSTATE_PREFERRED ||
5269 ip_state == IP_ADDRSTATE_DEPRICATED ||
5270 ip_state == IP_ADDRSTATE_DISABLING)
5271 ip_idx[idx] = -1;
5272 }
5273
5274 /* Break if all IP states checked */
5275 if ((ip_idx[0] == -1) &&
5276 (ip_idx[1] == -1) &&
5277 (ip_idx[2] == -1) &&
5278 (ip_idx[3] == -1))
5279 break;
5280 schedule_timeout_uninterruptible(HZ);
5281 } while (time_after(wtime, jiffies));
5282 }
5283
5284 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
5285 struct dev_db_entry *flash_ddb_entry)
5286 {
5287 uint16_t options = 0;
5288 size_t ip_len = IP_ADDR_LEN;
5289
5290 options = le16_to_cpu(fw_ddb_entry->options);
5291 if (options & DDB_OPT_IPV6_DEVICE)
5292 ip_len = IPv6_ADDR_LEN;
5293
5294 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
5295 return QLA_ERROR;
5296
5297 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
5298 sizeof(fw_ddb_entry->isid)))
5299 return QLA_ERROR;
5300
5301 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
5302 sizeof(fw_ddb_entry->port)))
5303 return QLA_ERROR;
5304
5305 return QLA_SUCCESS;
5306 }
5307
5308 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
5309 struct dev_db_entry *fw_ddb_entry,
5310 uint32_t fw_idx, uint32_t *flash_index)
5311 {
5312 struct dev_db_entry *flash_ddb_entry;
5313 dma_addr_t flash_ddb_entry_dma;
5314 uint32_t idx = 0;
5315 int max_ddbs;
5316 int ret = QLA_ERROR, status;
5317
5318 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5319 MAX_DEV_DB_ENTRIES;
5320
5321 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5322 &flash_ddb_entry_dma);
5323 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
5324 ql4_printk(KERN_ERR, ha, "Out of memory\n");
5325 goto exit_find_st_idx;
5326 }
5327
5328 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5329 flash_ddb_entry_dma, fw_idx);
5330 if (status == QLA_SUCCESS) {
5331 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5332 if (status == QLA_SUCCESS) {
5333 *flash_index = fw_idx;
5334 ret = QLA_SUCCESS;
5335 goto exit_find_st_idx;
5336 }
5337 }
5338
5339 for (idx = 0; idx < max_ddbs; idx++) {
5340 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5341 flash_ddb_entry_dma, idx);
5342 if (status == QLA_ERROR)
5343 continue;
5344
5345 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5346 if (status == QLA_SUCCESS) {
5347 *flash_index = idx;
5348 ret = QLA_SUCCESS;
5349 goto exit_find_st_idx;
5350 }
5351 }
5352
5353 if (idx == max_ddbs)
5354 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
5355 fw_idx);
5356
5357 exit_find_st_idx:
5358 if (flash_ddb_entry)
5359 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
5360 flash_ddb_entry_dma);
5361
5362 return ret;
5363 }
5364
5365 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
5366 struct list_head *list_st)
5367 {
5368 struct qla_ddb_index *st_ddb_idx;
5369 int max_ddbs;
5370 int fw_idx_size;
5371 struct dev_db_entry *fw_ddb_entry;
5372 dma_addr_t fw_ddb_dma;
5373 int ret;
5374 uint32_t idx = 0, next_idx = 0;
5375 uint32_t state = 0, conn_err = 0;
5376 uint32_t flash_index = -1;
5377 uint16_t conn_id = 0;
5378
5379 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5380 &fw_ddb_dma);
5381 if (fw_ddb_entry == NULL) {
5382 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5383 goto exit_st_list;
5384 }
5385
5386 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5387 MAX_DEV_DB_ENTRIES;
5388 fw_idx_size = sizeof(struct qla_ddb_index);
5389
5390 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5391 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5392 NULL, &next_idx, &state,
5393 &conn_err, NULL, &conn_id);
5394 if (ret == QLA_ERROR)
5395 break;
5396
5397 /* Ignore DDB if invalid state (unassigned) */
5398 if (state == DDB_DS_UNASSIGNED)
5399 goto continue_next_st;
5400
5401 /* Check if ST, add to the list_st */
5402 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
5403 goto continue_next_st;
5404
5405 st_ddb_idx = vzalloc(fw_idx_size);
5406 if (!st_ddb_idx)
5407 break;
5408
5409 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
5410 &flash_index);
5411 if (ret == QLA_ERROR) {
5412 ql4_printk(KERN_ERR, ha,
5413 "No flash entry for ST at idx [%d]\n", idx);
5414 st_ddb_idx->flash_ddb_idx = idx;
5415 } else {
5416 ql4_printk(KERN_INFO, ha,
5417 "ST at idx [%d] is stored at flash [%d]\n",
5418 idx, flash_index);
5419 st_ddb_idx->flash_ddb_idx = flash_index;
5420 }
5421
5422 st_ddb_idx->fw_ddb_idx = idx;
5423
5424 list_add_tail(&st_ddb_idx->list, list_st);
5425 continue_next_st:
5426 if (next_idx == 0)
5427 break;
5428 }
5429
5430 exit_st_list:
5431 if (fw_ddb_entry)
5432 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5433 }
5434
5435 /**
5436 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
5437 * @ha: pointer to adapter structure
5438 * @list_ddb: List from which failed ddb to be removed
5439 *
5440 * Iterate over the list of DDBs and find and remove DDBs that are either in
5441 * no connection active state or failed state
5442 **/
5443 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
5444 struct list_head *list_ddb)
5445 {
5446 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5447 uint32_t next_idx = 0;
5448 uint32_t state = 0, conn_err = 0;
5449 int ret;
5450
5451 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5452 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
5453 NULL, 0, NULL, &next_idx, &state,
5454 &conn_err, NULL, NULL);
5455 if (ret == QLA_ERROR)
5456 continue;
5457
5458 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5459 state == DDB_DS_SESSION_FAILED) {
5460 list_del_init(&ddb_idx->list);
5461 vfree(ddb_idx);
5462 }
5463 }
5464 }
5465
5466 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
5467 struct ddb_entry *ddb_entry,
5468 struct dev_db_entry *fw_ddb_entry)
5469 {
5470 struct iscsi_cls_session *cls_sess;
5471 struct iscsi_session *sess;
5472 uint32_t max_ddbs = 0;
5473 uint16_t ddb_link = -1;
5474
5475 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5476 MAX_DEV_DB_ENTRIES;
5477
5478 cls_sess = ddb_entry->sess;
5479 sess = cls_sess->dd_data;
5480
5481 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5482 if (ddb_link < max_ddbs)
5483 sess->discovery_parent_idx = ddb_link;
5484 else
5485 sess->discovery_parent_idx = DDB_NO_LINK;
5486 }
5487
5488 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
5489 struct dev_db_entry *fw_ddb_entry,
5490 int is_reset, uint16_t idx)
5491 {
5492 struct iscsi_cls_session *cls_sess;
5493 struct iscsi_session *sess;
5494 struct iscsi_cls_conn *cls_conn;
5495 struct iscsi_endpoint *ep;
5496 uint16_t cmds_max = 32;
5497 uint16_t conn_id = 0;
5498 uint32_t initial_cmdsn = 0;
5499 int ret = QLA_SUCCESS;
5500
5501 struct ddb_entry *ddb_entry = NULL;
5502
5503 /* Create session object, with INVALID_ENTRY,
5504 * the targer_id would get set when we issue the login
5505 */
5506 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
5507 cmds_max, sizeof(struct ddb_entry),
5508 sizeof(struct ql4_task_data),
5509 initial_cmdsn, INVALID_ENTRY);
5510 if (!cls_sess) {
5511 ret = QLA_ERROR;
5512 goto exit_setup;
5513 }
5514
5515 /*
5516 * so calling module_put function to decrement the
5517 * reference count.
5518 **/
5519 module_put(qla4xxx_iscsi_transport.owner);
5520 sess = cls_sess->dd_data;
5521 ddb_entry = sess->dd_data;
5522 ddb_entry->sess = cls_sess;
5523
5524 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
5525 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
5526 sizeof(struct dev_db_entry));
5527
5528 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
5529
5530 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
5531
5532 if (!cls_conn) {
5533 ret = QLA_ERROR;
5534 goto exit_setup;
5535 }
5536
5537 ddb_entry->conn = cls_conn;
5538
5539 /* Setup ep, for displaying attributes in sysfs */
5540 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
5541 if (ep) {
5542 ep->conn = cls_conn;
5543 cls_conn->ep = ep;
5544 } else {
5545 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
5546 ret = QLA_ERROR;
5547 goto exit_setup;
5548 }
5549
5550 /* Update sess/conn params */
5551 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
5552 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
5553
5554 if (is_reset == RESET_ADAPTER) {
5555 iscsi_block_session(cls_sess);
5556 /* Use the relogin path to discover new devices
5557 * by short-circuting the logic of setting
5558 * timer to relogin - instead set the flags
5559 * to initiate login right away.
5560 */
5561 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
5562 set_bit(DF_RELOGIN, &ddb_entry->flags);
5563 }
5564
5565 exit_setup:
5566 return ret;
5567 }
5568
5569 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
5570 struct list_head *list_ddb,
5571 struct dev_db_entry *fw_ddb_entry)
5572 {
5573 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5574 uint16_t ddb_link;
5575
5576 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5577
5578 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5579 if (ddb_idx->fw_ddb_idx == ddb_link) {
5580 DEBUG2(ql4_printk(KERN_INFO, ha,
5581 "Updating NT parent idx from [%d] to [%d]\n",
5582 ddb_link, ddb_idx->flash_ddb_idx));
5583 fw_ddb_entry->ddb_link =
5584 cpu_to_le16(ddb_idx->flash_ddb_idx);
5585 return;
5586 }
5587 }
5588 }
5589
5590 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5591 struct list_head *list_nt,
5592 struct list_head *list_st,
5593 int is_reset)
5594 {
5595 struct dev_db_entry *fw_ddb_entry;
5596 struct ddb_entry *ddb_entry = NULL;
5597 dma_addr_t fw_ddb_dma;
5598 int max_ddbs;
5599 int fw_idx_size;
5600 int ret;
5601 uint32_t idx = 0, next_idx = 0;
5602 uint32_t state = 0, conn_err = 0;
5603 uint32_t ddb_idx = -1;
5604 uint16_t conn_id = 0;
5605 uint16_t ddb_link = -1;
5606 struct qla_ddb_index *nt_ddb_idx;
5607
5608 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5609 &fw_ddb_dma);
5610 if (fw_ddb_entry == NULL) {
5611 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5612 goto exit_nt_list;
5613 }
5614 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5615 MAX_DEV_DB_ENTRIES;
5616 fw_idx_size = sizeof(struct qla_ddb_index);
5617
5618 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5619 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5620 NULL, &next_idx, &state,
5621 &conn_err, NULL, &conn_id);
5622 if (ret == QLA_ERROR)
5623 break;
5624
5625 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
5626 goto continue_next_nt;
5627
5628 /* Check if NT, then add to list it */
5629 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
5630 goto continue_next_nt;
5631
5632 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5633 if (ddb_link < max_ddbs)
5634 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
5635
5636 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
5637 state == DDB_DS_SESSION_FAILED) &&
5638 (is_reset == INIT_ADAPTER))
5639 goto continue_next_nt;
5640
5641 DEBUG2(ql4_printk(KERN_INFO, ha,
5642 "Adding DDB to session = 0x%x\n", idx));
5643
5644 if (is_reset == INIT_ADAPTER) {
5645 nt_ddb_idx = vmalloc(fw_idx_size);
5646 if (!nt_ddb_idx)
5647 break;
5648
5649 nt_ddb_idx->fw_ddb_idx = idx;
5650
5651 /* Copy original isid as it may get updated in function
5652 * qla4xxx_update_isid(). We need original isid in
5653 * function qla4xxx_compare_tuple_ddb to find duplicate
5654 * target */
5655 memcpy(&nt_ddb_idx->flash_isid[0],
5656 &fw_ddb_entry->isid[0],
5657 sizeof(nt_ddb_idx->flash_isid));
5658
5659 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
5660 fw_ddb_entry);
5661 if (ret == QLA_SUCCESS) {
5662 /* free nt_ddb_idx and do not add to list_nt */
5663 vfree(nt_ddb_idx);
5664 goto continue_next_nt;
5665 }
5666
5667 /* Copy updated isid */
5668 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
5669 sizeof(struct dev_db_entry));
5670
5671 list_add_tail(&nt_ddb_idx->list, list_nt);
5672 } else if (is_reset == RESET_ADAPTER) {
5673 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
5674 &ddb_idx);
5675 if (ret == QLA_SUCCESS) {
5676 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
5677 ddb_idx);
5678 if (ddb_entry != NULL)
5679 qla4xxx_update_sess_disc_idx(ha,
5680 ddb_entry,
5681 fw_ddb_entry);
5682 goto continue_next_nt;
5683 }
5684 }
5685
5686 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5687 if (ret == QLA_ERROR)
5688 goto exit_nt_list;
5689
5690 continue_next_nt:
5691 if (next_idx == 0)
5692 break;
5693 }
5694
5695 exit_nt_list:
5696 if (fw_ddb_entry)
5697 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5698 }
5699
5700 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5701 struct list_head *list_nt,
5702 uint16_t target_id)
5703 {
5704 struct dev_db_entry *fw_ddb_entry;
5705 dma_addr_t fw_ddb_dma;
5706 int max_ddbs;
5707 int fw_idx_size;
5708 int ret;
5709 uint32_t idx = 0, next_idx = 0;
5710 uint32_t state = 0, conn_err = 0;
5711 uint16_t conn_id = 0;
5712 struct qla_ddb_index *nt_ddb_idx;
5713
5714 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5715 &fw_ddb_dma);
5716 if (fw_ddb_entry == NULL) {
5717 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5718 goto exit_new_nt_list;
5719 }
5720 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5721 MAX_DEV_DB_ENTRIES;
5722 fw_idx_size = sizeof(struct qla_ddb_index);
5723
5724 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5725 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5726 NULL, &next_idx, &state,
5727 &conn_err, NULL, &conn_id);
5728 if (ret == QLA_ERROR)
5729 break;
5730
5731 /* Check if NT, then add it to list */
5732 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5733 goto continue_next_new_nt;
5734
5735 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5736 goto continue_next_new_nt;
5737
5738 DEBUG2(ql4_printk(KERN_INFO, ha,
5739 "Adding DDB to session = 0x%x\n", idx));
5740
5741 nt_ddb_idx = vmalloc(fw_idx_size);
5742 if (!nt_ddb_idx)
5743 break;
5744
5745 nt_ddb_idx->fw_ddb_idx = idx;
5746
5747 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
5748 if (ret == QLA_SUCCESS) {
5749 /* free nt_ddb_idx and do not add to list_nt */
5750 vfree(nt_ddb_idx);
5751 goto continue_next_new_nt;
5752 }
5753
5754 if (target_id < max_ddbs)
5755 fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
5756
5757 list_add_tail(&nt_ddb_idx->list, list_nt);
5758
5759 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5760 idx);
5761 if (ret == QLA_ERROR)
5762 goto exit_new_nt_list;
5763
5764 continue_next_new_nt:
5765 if (next_idx == 0)
5766 break;
5767 }
5768
5769 exit_new_nt_list:
5770 if (fw_ddb_entry)
5771 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5772 }
5773
5774 /**
5775 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
5776 * @dev: dev associated with the sysfs entry
5777 * @data: pointer to flashnode session object
5778 *
5779 * Returns:
5780 * 1: if flashnode entry is non-persistent
5781 * 0: if flashnode entry is persistent
5782 **/
5783 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5784 {
5785 struct iscsi_bus_flash_session *fnode_sess;
5786
5787 if (!iscsi_flashnode_bus_match(dev, NULL))
5788 return 0;
5789
5790 fnode_sess = iscsi_dev_to_flash_session(dev);
5791
5792 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
5793 }
5794
5795 /**
5796 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
5797 * @ha: pointer to host
5798 * @fw_ddb_entry: flash ddb data
5799 * @idx: target index
5800 * @user: if set then this call is made from userland else from kernel
5801 *
5802 * Returns:
5803 * On sucess: QLA_SUCCESS
5804 * On failure: QLA_ERROR
5805 *
5806 * This create separate sysfs entries for session and connection attributes of
5807 * the given fw ddb entry.
5808 * If this is invoked as a result of a userspace call then the entry is marked
5809 * as nonpersistent using flash_state field.
5810 **/
5811 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5812 struct dev_db_entry *fw_ddb_entry,
5813 uint16_t *idx, int user)
5814 {
5815 struct iscsi_bus_flash_session *fnode_sess = NULL;
5816 struct iscsi_bus_flash_conn *fnode_conn = NULL;
5817 int rc = QLA_ERROR;
5818
5819 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
5820 &qla4xxx_iscsi_transport, 0);
5821 if (!fnode_sess) {
5822 ql4_printk(KERN_ERR, ha,
5823 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
5824 __func__, *idx, ha->host_no);
5825 goto exit_tgt_create;
5826 }
5827
5828 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
5829 &qla4xxx_iscsi_transport, 0);
5830 if (!fnode_conn) {
5831 ql4_printk(KERN_ERR, ha,
5832 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
5833 __func__, *idx, ha->host_no);
5834 goto free_sess;
5835 }
5836
5837 if (user) {
5838 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
5839 } else {
5840 fnode_sess->flash_state = DEV_DB_PERSISTENT;
5841
5842 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
5843 fnode_sess->is_boot_target = 1;
5844 else
5845 fnode_sess->is_boot_target = 0;
5846 }
5847
5848 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
5849 fw_ddb_entry);
5850
5851 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5852 __func__, fnode_sess->dev.kobj.name);
5853
5854 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5855 __func__, fnode_conn->dev.kobj.name);
5856
5857 return QLA_SUCCESS;
5858
5859 free_sess:
5860 iscsi_destroy_flashnode_sess(fnode_sess);
5861
5862 exit_tgt_create:
5863 return QLA_ERROR;
5864 }
5865
5866 /**
5867 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
5868 * @shost: pointer to host
5869 * @buf: type of ddb entry (ipv4/ipv6)
5870 * @len: length of buf
5871 *
5872 * This creates new ddb entry in the flash by finding first free index and
5873 * storing default ddb there. And then create sysfs entry for the new ddb entry.
5874 **/
5875 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5876 int len)
5877 {
5878 struct scsi_qla_host *ha = to_qla_host(shost);
5879 struct dev_db_entry *fw_ddb_entry = NULL;
5880 dma_addr_t fw_ddb_entry_dma;
5881 struct device *dev;
5882 uint16_t idx = 0;
5883 uint16_t max_ddbs = 0;
5884 uint32_t options = 0;
5885 uint32_t rval = QLA_ERROR;
5886
5887 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
5888 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
5889 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
5890 __func__));
5891 goto exit_ddb_add;
5892 }
5893
5894 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
5895 MAX_DEV_DB_ENTRIES;
5896
5897 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5898 &fw_ddb_entry_dma, GFP_KERNEL);
5899 if (!fw_ddb_entry) {
5900 DEBUG2(ql4_printk(KERN_ERR, ha,
5901 "%s: Unable to allocate dma buffer\n",
5902 __func__));
5903 goto exit_ddb_add;
5904 }
5905
5906 dev = iscsi_find_flashnode_sess(ha->host, NULL,
5907 qla4xxx_sysfs_ddb_is_non_persistent);
5908 if (dev) {
5909 ql4_printk(KERN_ERR, ha,
5910 "%s: A non-persistent entry %s found\n",
5911 __func__, dev->kobj.name);
5912 put_device(dev);
5913 goto exit_ddb_add;
5914 }
5915
5916 /* Index 0 and 1 are reserved for boot target entries */
5917 for (idx = 2; idx < max_ddbs; idx++) {
5918 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
5919 fw_ddb_entry_dma, idx))
5920 break;
5921 }
5922
5923 if (idx == max_ddbs)
5924 goto exit_ddb_add;
5925
5926 if (!strncasecmp("ipv6", buf, 4))
5927 options |= IPV6_DEFAULT_DDB_ENTRY;
5928
5929 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5930 if (rval == QLA_ERROR)
5931 goto exit_ddb_add;
5932
5933 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
5934
5935 exit_ddb_add:
5936 if (fw_ddb_entry)
5937 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5938 fw_ddb_entry, fw_ddb_entry_dma);
5939 if (rval == QLA_SUCCESS)
5940 return idx;
5941 else
5942 return -EIO;
5943 }
5944
5945 /**
5946 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
5947 * @fnode_sess: pointer to session attrs of flash ddb entry
5948 * @fnode_conn: pointer to connection attrs of flash ddb entry
5949 *
5950 * This writes the contents of target ddb buffer to Flash with a valid cookie
5951 * value in order to make the ddb entry persistent.
5952 **/
5953 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
5954 struct iscsi_bus_flash_conn *fnode_conn)
5955 {
5956 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5957 struct scsi_qla_host *ha = to_qla_host(shost);
5958 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
5959 struct dev_db_entry *fw_ddb_entry = NULL;
5960 dma_addr_t fw_ddb_entry_dma;
5961 uint32_t options = 0;
5962 int rval = 0;
5963
5964 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5965 &fw_ddb_entry_dma, GFP_KERNEL);
5966 if (!fw_ddb_entry) {
5967 DEBUG2(ql4_printk(KERN_ERR, ha,
5968 "%s: Unable to allocate dma buffer\n",
5969 __func__));
5970 rval = -ENOMEM;
5971 goto exit_ddb_apply;
5972 }
5973
5974 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5975 options |= IPV6_DEFAULT_DDB_ENTRY;
5976
5977 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5978 if (rval == QLA_ERROR)
5979 goto exit_ddb_apply;
5980
5981 dev_db_start_offset += (fnode_sess->target_id *
5982 sizeof(*fw_ddb_entry));
5983
5984 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5985 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5986
5987 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
5988 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
5989
5990 if (rval == QLA_SUCCESS) {
5991 fnode_sess->flash_state = DEV_DB_PERSISTENT;
5992 ql4_printk(KERN_INFO, ha,
5993 "%s: flash node %u of host %lu written to flash\n",
5994 __func__, fnode_sess->target_id, ha->host_no);
5995 } else {
5996 rval = -EIO;
5997 ql4_printk(KERN_ERR, ha,
5998 "%s: Error while writing flash node %u of host %lu to flash\n",
5999 __func__, fnode_sess->target_id, ha->host_no);
6000 }
6001
6002 exit_ddb_apply:
6003 if (fw_ddb_entry)
6004 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6005 fw_ddb_entry, fw_ddb_entry_dma);
6006 return rval;
6007 }
6008
6009 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
6010 struct dev_db_entry *fw_ddb_entry,
6011 uint16_t idx)
6012 {
6013 struct dev_db_entry *ddb_entry = NULL;
6014 dma_addr_t ddb_entry_dma;
6015 unsigned long wtime;
6016 uint32_t mbx_sts = 0;
6017 uint32_t state = 0, conn_err = 0;
6018 uint16_t tmo = 0;
6019 int ret = 0;
6020
6021 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6022 &ddb_entry_dma, GFP_KERNEL);
6023 if (!ddb_entry) {
6024 DEBUG2(ql4_printk(KERN_ERR, ha,
6025 "%s: Unable to allocate dma buffer\n",
6026 __func__));
6027 return QLA_ERROR;
6028 }
6029
6030 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
6031
6032 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
6033 if (ret != QLA_SUCCESS) {
6034 DEBUG2(ql4_printk(KERN_ERR, ha,
6035 "%s: Unable to set ddb entry for index %d\n",
6036 __func__, idx));
6037 goto exit_ddb_conn_open;
6038 }
6039
6040 qla4xxx_conn_open(ha, idx);
6041
6042 /* To ensure that sendtargets is done, wait for at least 12 secs */
6043 tmo = ((ha->def_timeout > LOGIN_TOV) &&
6044 (ha->def_timeout < LOGIN_TOV * 10) ?
6045 ha->def_timeout : LOGIN_TOV);
6046
6047 DEBUG2(ql4_printk(KERN_INFO, ha,
6048 "Default time to wait for login to ddb %d\n", tmo));
6049
6050 wtime = jiffies + (HZ * tmo);
6051 do {
6052 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
6053 NULL, &state, &conn_err, NULL,
6054 NULL);
6055 if (ret == QLA_ERROR)
6056 continue;
6057
6058 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6059 state == DDB_DS_SESSION_FAILED)
6060 break;
6061
6062 schedule_timeout_uninterruptible(HZ / 10);
6063 } while (time_after(wtime, jiffies));
6064
6065 exit_ddb_conn_open:
6066 if (ddb_entry)
6067 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6068 ddb_entry, ddb_entry_dma);
6069 return ret;
6070 }
6071
6072 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
6073 struct dev_db_entry *fw_ddb_entry,
6074 uint16_t target_id)
6075 {
6076 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6077 struct list_head list_nt;
6078 uint16_t ddb_index;
6079 int ret = 0;
6080
6081 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
6082 ql4_printk(KERN_WARNING, ha,
6083 "%s: A discovery already in progress!\n", __func__);
6084 return QLA_ERROR;
6085 }
6086
6087 INIT_LIST_HEAD(&list_nt);
6088
6089 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6090
6091 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
6092 if (ret == QLA_ERROR)
6093 goto exit_login_st_clr_bit;
6094
6095 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
6096 if (ret == QLA_ERROR)
6097 goto exit_login_st;
6098
6099 qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
6100
6101 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
6102 list_del_init(&ddb_idx->list);
6103 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
6104 vfree(ddb_idx);
6105 }
6106
6107 exit_login_st:
6108 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
6109 ql4_printk(KERN_ERR, ha,
6110 "Unable to clear DDB index = 0x%x\n", ddb_index);
6111 }
6112
6113 clear_bit(ddb_index, ha->ddb_idx_map);
6114
6115 exit_login_st_clr_bit:
6116 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6117 return ret;
6118 }
6119
6120 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
6121 struct dev_db_entry *fw_ddb_entry,
6122 uint16_t idx)
6123 {
6124 int ret = QLA_ERROR;
6125
6126 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
6127 if (ret != QLA_SUCCESS)
6128 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
6129 idx);
6130 else
6131 ret = -EPERM;
6132
6133 return ret;
6134 }
6135
6136 /**
6137 * qla4xxx_sysfs_ddb_login - Login to the specified target
6138 * @fnode_sess: pointer to session attrs of flash ddb entry
6139 * @fnode_conn: pointer to connection attrs of flash ddb entry
6140 *
6141 * This logs in to the specified target
6142 **/
6143 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
6144 struct iscsi_bus_flash_conn *fnode_conn)
6145 {
6146 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6147 struct scsi_qla_host *ha = to_qla_host(shost);
6148 struct dev_db_entry *fw_ddb_entry = NULL;
6149 dma_addr_t fw_ddb_entry_dma;
6150 uint32_t options = 0;
6151 int ret = 0;
6152
6153 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
6154 ql4_printk(KERN_ERR, ha,
6155 "%s: Target info is not persistent\n", __func__);
6156 ret = -EIO;
6157 goto exit_ddb_login;
6158 }
6159
6160 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6161 &fw_ddb_entry_dma, GFP_KERNEL);
6162 if (!fw_ddb_entry) {
6163 DEBUG2(ql4_printk(KERN_ERR, ha,
6164 "%s: Unable to allocate dma buffer\n",
6165 __func__));
6166 ret = -ENOMEM;
6167 goto exit_ddb_login;
6168 }
6169
6170 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6171 options |= IPV6_DEFAULT_DDB_ENTRY;
6172
6173 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6174 if (ret == QLA_ERROR)
6175 goto exit_ddb_login;
6176
6177 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6178 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6179
6180 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
6181 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
6182 fnode_sess->target_id);
6183 else
6184 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
6185 fnode_sess->target_id);
6186
6187 if (ret > 0)
6188 ret = -EIO;
6189
6190 exit_ddb_login:
6191 if (fw_ddb_entry)
6192 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6193 fw_ddb_entry, fw_ddb_entry_dma);
6194 return ret;
6195 }
6196
6197 /**
6198 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
6199 * @cls_sess: pointer to session to be logged out
6200 *
6201 * This performs session log out from the specified target
6202 **/
6203 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
6204 {
6205 struct iscsi_session *sess;
6206 struct ddb_entry *ddb_entry = NULL;
6207 struct scsi_qla_host *ha;
6208 struct dev_db_entry *fw_ddb_entry = NULL;
6209 dma_addr_t fw_ddb_entry_dma;
6210 unsigned long flags;
6211 unsigned long wtime;
6212 uint32_t ddb_state;
6213 int options;
6214 int ret = 0;
6215
6216 sess = cls_sess->dd_data;
6217 ddb_entry = sess->dd_data;
6218 ha = ddb_entry->ha;
6219
6220 if (ddb_entry->ddb_type != FLASH_DDB) {
6221 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
6222 __func__);
6223 ret = -ENXIO;
6224 goto exit_ddb_logout;
6225 }
6226
6227 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6228 ql4_printk(KERN_ERR, ha,
6229 "%s: Logout from boot target entry is not permitted.\n",
6230 __func__);
6231 ret = -EPERM;
6232 goto exit_ddb_logout;
6233 }
6234
6235 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6236 &fw_ddb_entry_dma, GFP_KERNEL);
6237 if (!fw_ddb_entry) {
6238 ql4_printk(KERN_ERR, ha,
6239 "%s: Unable to allocate dma buffer\n", __func__);
6240 ret = -ENOMEM;
6241 goto exit_ddb_logout;
6242 }
6243
6244 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
6245 goto ddb_logout_init;
6246
6247 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6248 fw_ddb_entry, fw_ddb_entry_dma,
6249 NULL, NULL, &ddb_state, NULL,
6250 NULL, NULL);
6251 if (ret == QLA_ERROR)
6252 goto ddb_logout_init;
6253
6254 if (ddb_state == DDB_DS_SESSION_ACTIVE)
6255 goto ddb_logout_init;
6256
6257 /* wait until next relogin is triggered using DF_RELOGIN and
6258 * clear DF_RELOGIN to avoid invocation of further relogin
6259 */
6260 wtime = jiffies + (HZ * RELOGIN_TOV);
6261 do {
6262 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
6263 goto ddb_logout_init;
6264
6265 schedule_timeout_uninterruptible(HZ);
6266 } while ((time_after(wtime, jiffies)));
6267
6268 ddb_logout_init:
6269 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6270 atomic_set(&ddb_entry->relogin_timer, 0);
6271
6272 options = LOGOUT_OPTION_CLOSE_SESSION;
6273 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
6274
6275 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
6276 wtime = jiffies + (HZ * LOGOUT_TOV);
6277 do {
6278 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6279 fw_ddb_entry, fw_ddb_entry_dma,
6280 NULL, NULL, &ddb_state, NULL,
6281 NULL, NULL);
6282 if (ret == QLA_ERROR)
6283 goto ddb_logout_clr_sess;
6284
6285 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
6286 (ddb_state == DDB_DS_SESSION_FAILED))
6287 goto ddb_logout_clr_sess;
6288
6289 schedule_timeout_uninterruptible(HZ);
6290 } while ((time_after(wtime, jiffies)));
6291
6292 ddb_logout_clr_sess:
6293 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
6294 /*
6295 * we have decremented the reference count of the driver
6296 * when we setup the session to have the driver unload
6297 * to be seamless without actually destroying the
6298 * session
6299 **/
6300 try_module_get(qla4xxx_iscsi_transport.owner);
6301 iscsi_destroy_endpoint(ddb_entry->conn->ep);
6302
6303 spin_lock_irqsave(&ha->hardware_lock, flags);
6304 qla4xxx_free_ddb(ha, ddb_entry);
6305 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
6306 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6307
6308 iscsi_session_teardown(ddb_entry->sess);
6309
6310 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
6311 ret = QLA_SUCCESS;
6312
6313 exit_ddb_logout:
6314 if (fw_ddb_entry)
6315 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6316 fw_ddb_entry, fw_ddb_entry_dma);
6317 return ret;
6318 }
6319
6320 /**
6321 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
6322 * @fnode_sess: pointer to session attrs of flash ddb entry
6323 * @fnode_conn: pointer to connection attrs of flash ddb entry
6324 *
6325 * This performs log out from the specified target
6326 **/
6327 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
6328 struct iscsi_bus_flash_conn *fnode_conn)
6329 {
6330 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6331 struct scsi_qla_host *ha = to_qla_host(shost);
6332 struct ql4_tuple_ddb *flash_tddb = NULL;
6333 struct ql4_tuple_ddb *tmp_tddb = NULL;
6334 struct dev_db_entry *fw_ddb_entry = NULL;
6335 struct ddb_entry *ddb_entry = NULL;
6336 dma_addr_t fw_ddb_dma;
6337 uint32_t next_idx = 0;
6338 uint32_t state = 0, conn_err = 0;
6339 uint16_t conn_id = 0;
6340 int idx, index;
6341 int status, ret = 0;
6342
6343 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6344 &fw_ddb_dma);
6345 if (fw_ddb_entry == NULL) {
6346 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6347 ret = -ENOMEM;
6348 goto exit_ddb_logout;
6349 }
6350
6351 flash_tddb = vzalloc(sizeof(*flash_tddb));
6352 if (!flash_tddb) {
6353 ql4_printk(KERN_WARNING, ha,
6354 "%s:Memory Allocation failed.\n", __func__);
6355 ret = -ENOMEM;
6356 goto exit_ddb_logout;
6357 }
6358
6359 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6360 if (!tmp_tddb) {
6361 ql4_printk(KERN_WARNING, ha,
6362 "%s:Memory Allocation failed.\n", __func__);
6363 ret = -ENOMEM;
6364 goto exit_ddb_logout;
6365 }
6366
6367 if (!fnode_sess->targetname) {
6368 ql4_printk(KERN_ERR, ha,
6369 "%s:Cannot logout from SendTarget entry\n",
6370 __func__);
6371 ret = -EPERM;
6372 goto exit_ddb_logout;
6373 }
6374
6375 if (fnode_sess->is_boot_target) {
6376 ql4_printk(KERN_ERR, ha,
6377 "%s: Logout from boot target entry is not permitted.\n",
6378 __func__);
6379 ret = -EPERM;
6380 goto exit_ddb_logout;
6381 }
6382
6383 strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6384 ISCSI_NAME_SIZE);
6385
6386 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6387 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6388 else
6389 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6390
6391 flash_tddb->tpgt = fnode_sess->tpgt;
6392 flash_tddb->port = fnode_conn->port;
6393
6394 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6395
6396 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6397 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6398 if (ddb_entry == NULL)
6399 continue;
6400
6401 if (ddb_entry->ddb_type != FLASH_DDB)
6402 continue;
6403
6404 index = ddb_entry->sess->target_id;
6405 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6406 fw_ddb_dma, NULL, &next_idx,
6407 &state, &conn_err, NULL,
6408 &conn_id);
6409 if (status == QLA_ERROR) {
6410 ret = -ENOMEM;
6411 break;
6412 }
6413
6414 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6415
6416 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6417 true);
6418 if (status == QLA_SUCCESS) {
6419 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6420 break;
6421 }
6422 }
6423
6424 if (idx == MAX_DDB_ENTRIES)
6425 ret = -ESRCH;
6426
6427 exit_ddb_logout:
6428 if (flash_tddb)
6429 vfree(flash_tddb);
6430 if (tmp_tddb)
6431 vfree(tmp_tddb);
6432 if (fw_ddb_entry)
6433 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6434
6435 return ret;
6436 }
6437
6438 static int
6439 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6440 int param, char *buf)
6441 {
6442 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6443 struct scsi_qla_host *ha = to_qla_host(shost);
6444 struct iscsi_bus_flash_conn *fnode_conn;
6445 struct ql4_chap_table chap_tbl;
6446 struct device *dev;
6447 int parent_type;
6448 int rc = 0;
6449
6450 dev = iscsi_find_flashnode_conn(fnode_sess);
6451 if (!dev)
6452 return -EIO;
6453
6454 fnode_conn = iscsi_dev_to_flash_conn(dev);
6455
6456 switch (param) {
6457 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6458 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6459 break;
6460 case ISCSI_FLASHNODE_PORTAL_TYPE:
6461 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6462 break;
6463 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6464 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6465 break;
6466 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6467 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6468 break;
6469 case ISCSI_FLASHNODE_ENTRY_EN:
6470 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6471 break;
6472 case ISCSI_FLASHNODE_HDR_DGST_EN:
6473 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6474 break;
6475 case ISCSI_FLASHNODE_DATA_DGST_EN:
6476 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6477 break;
6478 case ISCSI_FLASHNODE_IMM_DATA_EN:
6479 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6480 break;
6481 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6482 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6483 break;
6484 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6485 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6486 break;
6487 case ISCSI_FLASHNODE_PDU_INORDER:
6488 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6489 break;
6490 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6491 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6492 break;
6493 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6494 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6495 break;
6496 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6497 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6498 break;
6499 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6500 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6501 break;
6502 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6503 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6504 break;
6505 case ISCSI_FLASHNODE_ERL:
6506 rc = sprintf(buf, "%u\n", fnode_sess->erl);
6507 break;
6508 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6509 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6510 break;
6511 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6512 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6513 break;
6514 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6515 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6516 break;
6517 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6518 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6519 break;
6520 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6521 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6522 break;
6523 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6524 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6525 break;
6526 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6527 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6528 break;
6529 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6530 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6531 break;
6532 case ISCSI_FLASHNODE_FIRST_BURST:
6533 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6534 break;
6535 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6536 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6537 break;
6538 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6539 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6540 break;
6541 case ISCSI_FLASHNODE_MAX_R2T:
6542 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6543 break;
6544 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6545 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6546 break;
6547 case ISCSI_FLASHNODE_ISID:
6548 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6549 fnode_sess->isid[0], fnode_sess->isid[1],
6550 fnode_sess->isid[2], fnode_sess->isid[3],
6551 fnode_sess->isid[4], fnode_sess->isid[5]);
6552 break;
6553 case ISCSI_FLASHNODE_TSID:
6554 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6555 break;
6556 case ISCSI_FLASHNODE_PORT:
6557 rc = sprintf(buf, "%d\n", fnode_conn->port);
6558 break;
6559 case ISCSI_FLASHNODE_MAX_BURST:
6560 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6561 break;
6562 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6563 rc = sprintf(buf, "%u\n",
6564 fnode_sess->default_taskmgmt_timeout);
6565 break;
6566 case ISCSI_FLASHNODE_IPADDR:
6567 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6568 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6569 else
6570 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6571 break;
6572 case ISCSI_FLASHNODE_ALIAS:
6573 if (fnode_sess->targetalias)
6574 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6575 else
6576 rc = sprintf(buf, "\n");
6577 break;
6578 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6579 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6580 rc = sprintf(buf, "%pI6\n",
6581 fnode_conn->redirect_ipaddr);
6582 else
6583 rc = sprintf(buf, "%pI4\n",
6584 fnode_conn->redirect_ipaddr);
6585 break;
6586 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6587 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6588 break;
6589 case ISCSI_FLASHNODE_LOCAL_PORT:
6590 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6591 break;
6592 case ISCSI_FLASHNODE_IPV4_TOS:
6593 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6594 break;
6595 case ISCSI_FLASHNODE_IPV6_TC:
6596 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6597 rc = sprintf(buf, "%u\n",
6598 fnode_conn->ipv6_traffic_class);
6599 else
6600 rc = sprintf(buf, "\n");
6601 break;
6602 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6603 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6604 break;
6605 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6606 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6607 rc = sprintf(buf, "%pI6\n",
6608 fnode_conn->link_local_ipv6_addr);
6609 else
6610 rc = sprintf(buf, "\n");
6611 break;
6612 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6613 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
6614 break;
6615 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6616 if (fnode_sess->discovery_parent_type == DDB_ISNS)
6617 parent_type = ISCSI_DISC_PARENT_ISNS;
6618 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6619 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6620 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6621 parent_type = ISCSI_DISC_PARENT_SENDTGT;
6622 else
6623 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6624
6625 rc = sprintf(buf, "%s\n",
6626 iscsi_get_discovery_parent_name(parent_type));
6627 break;
6628 case ISCSI_FLASHNODE_NAME:
6629 if (fnode_sess->targetname)
6630 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6631 else
6632 rc = sprintf(buf, "\n");
6633 break;
6634 case ISCSI_FLASHNODE_TPGT:
6635 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6636 break;
6637 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6638 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6639 break;
6640 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6641 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6642 break;
6643 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6644 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6645 break;
6646 case ISCSI_FLASHNODE_USERNAME:
6647 if (fnode_sess->chap_auth_en) {
6648 qla4xxx_get_uni_chap_at_index(ha,
6649 chap_tbl.name,
6650 chap_tbl.secret,
6651 fnode_sess->chap_out_idx);
6652 rc = sprintf(buf, "%s\n", chap_tbl.name);
6653 } else {
6654 rc = sprintf(buf, "\n");
6655 }
6656 break;
6657 case ISCSI_FLASHNODE_PASSWORD:
6658 if (fnode_sess->chap_auth_en) {
6659 qla4xxx_get_uni_chap_at_index(ha,
6660 chap_tbl.name,
6661 chap_tbl.secret,
6662 fnode_sess->chap_out_idx);
6663 rc = sprintf(buf, "%s\n", chap_tbl.secret);
6664 } else {
6665 rc = sprintf(buf, "\n");
6666 }
6667 break;
6668 case ISCSI_FLASHNODE_STATSN:
6669 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6670 break;
6671 case ISCSI_FLASHNODE_EXP_STATSN:
6672 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6673 break;
6674 case ISCSI_FLASHNODE_IS_BOOT_TGT:
6675 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6676 break;
6677 default:
6678 rc = -ENOSYS;
6679 break;
6680 }
6681
6682 put_device(dev);
6683 return rc;
6684 }
6685
6686 /**
6687 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6688 * @fnode_sess: pointer to session attrs of flash ddb entry
6689 * @fnode_conn: pointer to connection attrs of flash ddb entry
6690 * @data: Parameters and their values to update
6691 * @len: len of data
6692 *
6693 * This sets the parameter of flash ddb entry and writes them to flash
6694 **/
6695 static int
6696 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6697 struct iscsi_bus_flash_conn *fnode_conn,
6698 void *data, int len)
6699 {
6700 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6701 struct scsi_qla_host *ha = to_qla_host(shost);
6702 struct iscsi_flashnode_param_info *fnode_param;
6703 struct ql4_chap_table chap_tbl;
6704 struct nlattr *attr;
6705 uint16_t chap_out_idx = INVALID_ENTRY;
6706 int rc = QLA_ERROR;
6707 uint32_t rem = len;
6708
6709 memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
6710 nla_for_each_attr(attr, data, len, rem) {
6711 fnode_param = nla_data(attr);
6712
6713 switch (fnode_param->param) {
6714 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6715 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6716 break;
6717 case ISCSI_FLASHNODE_PORTAL_TYPE:
6718 memcpy(fnode_sess->portal_type, fnode_param->value,
6719 strlen(fnode_sess->portal_type));
6720 break;
6721 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6722 fnode_sess->auto_snd_tgt_disable =
6723 fnode_param->value[0];
6724 break;
6725 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6726 fnode_sess->discovery_sess = fnode_param->value[0];
6727 break;
6728 case ISCSI_FLASHNODE_ENTRY_EN:
6729 fnode_sess->entry_state = fnode_param->value[0];
6730 break;
6731 case ISCSI_FLASHNODE_HDR_DGST_EN:
6732 fnode_conn->hdrdgst_en = fnode_param->value[0];
6733 break;
6734 case ISCSI_FLASHNODE_DATA_DGST_EN:
6735 fnode_conn->datadgst_en = fnode_param->value[0];
6736 break;
6737 case ISCSI_FLASHNODE_IMM_DATA_EN:
6738 fnode_sess->imm_data_en = fnode_param->value[0];
6739 break;
6740 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6741 fnode_sess->initial_r2t_en = fnode_param->value[0];
6742 break;
6743 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6744 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6745 break;
6746 case ISCSI_FLASHNODE_PDU_INORDER:
6747 fnode_sess->pdu_inorder_en = fnode_param->value[0];
6748 break;
6749 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6750 fnode_sess->chap_auth_en = fnode_param->value[0];
6751 /* Invalidate chap index if chap auth is disabled */
6752 if (!fnode_sess->chap_auth_en)
6753 fnode_sess->chap_out_idx = INVALID_ENTRY;
6754
6755 break;
6756 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6757 fnode_conn->snack_req_en = fnode_param->value[0];
6758 break;
6759 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6760 fnode_sess->discovery_logout_en = fnode_param->value[0];
6761 break;
6762 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6763 fnode_sess->bidi_chap_en = fnode_param->value[0];
6764 break;
6765 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6766 fnode_sess->discovery_auth_optional =
6767 fnode_param->value[0];
6768 break;
6769 case ISCSI_FLASHNODE_ERL:
6770 fnode_sess->erl = fnode_param->value[0];
6771 break;
6772 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6773 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
6774 break;
6775 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6776 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
6777 break;
6778 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6779 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
6780 break;
6781 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6782 fnode_conn->tcp_timer_scale = fnode_param->value[0];
6783 break;
6784 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6785 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
6786 break;
6787 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6788 fnode_conn->fragment_disable = fnode_param->value[0];
6789 break;
6790 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6791 fnode_conn->max_recv_dlength =
6792 *(unsigned *)fnode_param->value;
6793 break;
6794 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6795 fnode_conn->max_xmit_dlength =
6796 *(unsigned *)fnode_param->value;
6797 break;
6798 case ISCSI_FLASHNODE_FIRST_BURST:
6799 fnode_sess->first_burst =
6800 *(unsigned *)fnode_param->value;
6801 break;
6802 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6803 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
6804 break;
6805 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6806 fnode_sess->time2retain =
6807 *(uint16_t *)fnode_param->value;
6808 break;
6809 case ISCSI_FLASHNODE_MAX_R2T:
6810 fnode_sess->max_r2t =
6811 *(uint16_t *)fnode_param->value;
6812 break;
6813 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6814 fnode_conn->keepalive_timeout =
6815 *(uint16_t *)fnode_param->value;
6816 break;
6817 case ISCSI_FLASHNODE_ISID:
6818 memcpy(fnode_sess->isid, fnode_param->value,
6819 sizeof(fnode_sess->isid));
6820 break;
6821 case ISCSI_FLASHNODE_TSID:
6822 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
6823 break;
6824 case ISCSI_FLASHNODE_PORT:
6825 fnode_conn->port = *(uint16_t *)fnode_param->value;
6826 break;
6827 case ISCSI_FLASHNODE_MAX_BURST:
6828 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
6829 break;
6830 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6831 fnode_sess->default_taskmgmt_timeout =
6832 *(uint16_t *)fnode_param->value;
6833 break;
6834 case ISCSI_FLASHNODE_IPADDR:
6835 memcpy(fnode_conn->ipaddress, fnode_param->value,
6836 IPv6_ADDR_LEN);
6837 break;
6838 case ISCSI_FLASHNODE_ALIAS:
6839 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
6840 (char *)fnode_param->value);
6841 break;
6842 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6843 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
6844 IPv6_ADDR_LEN);
6845 break;
6846 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6847 fnode_conn->max_segment_size =
6848 *(unsigned *)fnode_param->value;
6849 break;
6850 case ISCSI_FLASHNODE_LOCAL_PORT:
6851 fnode_conn->local_port =
6852 *(uint16_t *)fnode_param->value;
6853 break;
6854 case ISCSI_FLASHNODE_IPV4_TOS:
6855 fnode_conn->ipv4_tos = fnode_param->value[0];
6856 break;
6857 case ISCSI_FLASHNODE_IPV6_TC:
6858 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
6859 break;
6860 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6861 fnode_conn->ipv6_flow_label = fnode_param->value[0];
6862 break;
6863 case ISCSI_FLASHNODE_NAME:
6864 rc = iscsi_switch_str_param(&fnode_sess->targetname,
6865 (char *)fnode_param->value);
6866 break;
6867 case ISCSI_FLASHNODE_TPGT:
6868 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
6869 break;
6870 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6871 memcpy(fnode_conn->link_local_ipv6_addr,
6872 fnode_param->value, IPv6_ADDR_LEN);
6873 break;
6874 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6875 fnode_sess->discovery_parent_idx =
6876 *(uint16_t *)fnode_param->value;
6877 break;
6878 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6879 fnode_conn->tcp_xmit_wsf =
6880 *(uint8_t *)fnode_param->value;
6881 break;
6882 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6883 fnode_conn->tcp_recv_wsf =
6884 *(uint8_t *)fnode_param->value;
6885 break;
6886 case ISCSI_FLASHNODE_STATSN:
6887 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
6888 break;
6889 case ISCSI_FLASHNODE_EXP_STATSN:
6890 fnode_conn->exp_statsn =
6891 *(uint32_t *)fnode_param->value;
6892 break;
6893 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6894 chap_out_idx = *(uint16_t *)fnode_param->value;
6895 if (!qla4xxx_get_uni_chap_at_index(ha,
6896 chap_tbl.name,
6897 chap_tbl.secret,
6898 chap_out_idx)) {
6899 fnode_sess->chap_out_idx = chap_out_idx;
6900 /* Enable chap auth if chap index is valid */
6901 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
6902 }
6903 break;
6904 default:
6905 ql4_printk(KERN_ERR, ha,
6906 "%s: No such sysfs attribute\n", __func__);
6907 rc = -ENOSYS;
6908 goto exit_set_param;
6909 }
6910 }
6911
6912 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
6913
6914 exit_set_param:
6915 return rc;
6916 }
6917
6918 /**
6919 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
6920 * @fnode_sess: pointer to session attrs of flash ddb entry
6921 *
6922 * This invalidates the flash ddb entry at the given index
6923 **/
6924 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6925 {
6926 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6927 struct scsi_qla_host *ha = to_qla_host(shost);
6928 uint32_t dev_db_start_offset;
6929 uint32_t dev_db_end_offset;
6930 struct dev_db_entry *fw_ddb_entry = NULL;
6931 dma_addr_t fw_ddb_entry_dma;
6932 uint16_t *ddb_cookie = NULL;
6933 size_t ddb_size = 0;
6934 void *pddb = NULL;
6935 int target_id;
6936 int rc = 0;
6937
6938 if (fnode_sess->is_boot_target) {
6939 rc = -EPERM;
6940 DEBUG2(ql4_printk(KERN_ERR, ha,
6941 "%s: Deletion of boot target entry is not permitted.\n",
6942 __func__));
6943 goto exit_ddb_del;
6944 }
6945
6946 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
6947 goto sysfs_ddb_del;
6948
6949 if (is_qla40XX(ha)) {
6950 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6951 dev_db_end_offset = FLASH_OFFSET_DB_END;
6952 dev_db_start_offset += (fnode_sess->target_id *
6953 sizeof(*fw_ddb_entry));
6954 ddb_size = sizeof(*fw_ddb_entry);
6955 } else {
6956 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
6957 (ha->hw.flt_region_ddb << 2);
6958 /* flt_ddb_size is DDB table size for both ports
6959 * so divide it by 2 to calculate the offset for second port
6960 */
6961 if (ha->port_num == 1)
6962 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
6963
6964 dev_db_end_offset = dev_db_start_offset +
6965 (ha->hw.flt_ddb_size / 2);
6966
6967 dev_db_start_offset += (fnode_sess->target_id *
6968 sizeof(*fw_ddb_entry));
6969 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
6970
6971 ddb_size = sizeof(*ddb_cookie);
6972 }
6973
6974 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
6975 __func__, dev_db_start_offset, dev_db_end_offset));
6976
6977 if (dev_db_start_offset > dev_db_end_offset) {
6978 rc = -EIO;
6979 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
6980 __func__, fnode_sess->target_id));
6981 goto exit_ddb_del;
6982 }
6983
6984 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
6985 &fw_ddb_entry_dma, GFP_KERNEL);
6986 if (!pddb) {
6987 rc = -ENOMEM;
6988 DEBUG2(ql4_printk(KERN_ERR, ha,
6989 "%s: Unable to allocate dma buffer\n",
6990 __func__));
6991 goto exit_ddb_del;
6992 }
6993
6994 if (is_qla40XX(ha)) {
6995 fw_ddb_entry = pddb;
6996 memset(fw_ddb_entry, 0, ddb_size);
6997 ddb_cookie = &fw_ddb_entry->cookie;
6998 } else {
6999 ddb_cookie = pddb;
7000 }
7001
7002 /* invalidate the cookie */
7003 *ddb_cookie = 0xFFEE;
7004 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7005 ddb_size, FLASH_OPT_RMW_COMMIT);
7006
7007 sysfs_ddb_del:
7008 target_id = fnode_sess->target_id;
7009 iscsi_destroy_flashnode_sess(fnode_sess);
7010 ql4_printk(KERN_INFO, ha,
7011 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
7012 __func__, target_id, ha->host_no);
7013 exit_ddb_del:
7014 if (pddb)
7015 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
7016 fw_ddb_entry_dma);
7017 return rc;
7018 }
7019
7020 /**
7021 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
7022 * @ha: pointer to adapter structure
7023 *
7024 * Export the firmware DDB for all send targets and normal targets to sysfs.
7025 **/
7026 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
7027 {
7028 struct dev_db_entry *fw_ddb_entry = NULL;
7029 dma_addr_t fw_ddb_entry_dma;
7030 uint16_t max_ddbs;
7031 uint16_t idx = 0;
7032 int ret = QLA_SUCCESS;
7033
7034 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
7035 sizeof(*fw_ddb_entry),
7036 &fw_ddb_entry_dma, GFP_KERNEL);
7037 if (!fw_ddb_entry) {
7038 DEBUG2(ql4_printk(KERN_ERR, ha,
7039 "%s: Unable to allocate dma buffer\n",
7040 __func__));
7041 return -ENOMEM;
7042 }
7043
7044 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7045 MAX_DEV_DB_ENTRIES;
7046
7047 for (idx = 0; idx < max_ddbs; idx++) {
7048 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
7049 idx))
7050 continue;
7051
7052 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
7053 if (ret) {
7054 ret = -EIO;
7055 break;
7056 }
7057 }
7058
7059 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
7060 fw_ddb_entry_dma);
7061
7062 return ret;
7063 }
7064
7065 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
7066 {
7067 iscsi_destroy_all_flashnode(ha->host);
7068 }
7069
7070 /**
7071 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
7072 * @ha: pointer to adapter structure
7073 * @is_reset: Is this init path or reset path
7074 *
7075 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
7076 * using connection open, then create the list of normal targets (nt)
7077 * from firmware DDBs. Based on the list of nt setup session and connection
7078 * objects.
7079 **/
7080 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
7081 {
7082 uint16_t tmo = 0;
7083 struct list_head list_st, list_nt;
7084 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
7085 unsigned long wtime;
7086
7087 if (!test_bit(AF_LINK_UP, &ha->flags)) {
7088 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
7089 ha->is_reset = is_reset;
7090 return;
7091 }
7092
7093 INIT_LIST_HEAD(&list_st);
7094 INIT_LIST_HEAD(&list_nt);
7095
7096 qla4xxx_build_st_list(ha, &list_st);
7097
7098 /* Before issuing conn open mbox, ensure all IPs states are configured
7099 * Note, conn open fails if IPs are not configured
7100 */
7101 qla4xxx_wait_for_ip_configuration(ha);
7102
7103 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
7104 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
7105 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
7106 }
7107
7108 /* Wait to ensure all sendtargets are done for min 12 sec wait */
7109 tmo = ((ha->def_timeout > LOGIN_TOV) &&
7110 (ha->def_timeout < LOGIN_TOV * 10) ?
7111 ha->def_timeout : LOGIN_TOV);
7112
7113 DEBUG2(ql4_printk(KERN_INFO, ha,
7114 "Default time to wait for build ddb %d\n", tmo));
7115
7116 wtime = jiffies + (HZ * tmo);
7117 do {
7118 if (list_empty(&list_st))
7119 break;
7120
7121 qla4xxx_remove_failed_ddb(ha, &list_st);
7122 schedule_timeout_uninterruptible(HZ / 10);
7123 } while (time_after(wtime, jiffies));
7124
7125
7126 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
7127
7128 qla4xxx_free_ddb_list(&list_st);
7129 qla4xxx_free_ddb_list(&list_nt);
7130
7131 qla4xxx_free_ddb_index(ha);
7132 }
7133
7134 /**
7135 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
7136 * response.
7137 * @ha: pointer to adapter structure
7138 *
7139 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
7140 * set in DDB and we will wait for login response of boot targets during
7141 * probe.
7142 **/
7143 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
7144 {
7145 struct ddb_entry *ddb_entry;
7146 struct dev_db_entry *fw_ddb_entry = NULL;
7147 dma_addr_t fw_ddb_entry_dma;
7148 unsigned long wtime;
7149 uint32_t ddb_state;
7150 int max_ddbs, idx, ret;
7151
7152 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7153 MAX_DEV_DB_ENTRIES;
7154
7155 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7156 &fw_ddb_entry_dma, GFP_KERNEL);
7157 if (!fw_ddb_entry) {
7158 ql4_printk(KERN_ERR, ha,
7159 "%s: Unable to allocate dma buffer\n", __func__);
7160 goto exit_login_resp;
7161 }
7162
7163 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
7164
7165 for (idx = 0; idx < max_ddbs; idx++) {
7166 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7167 if (ddb_entry == NULL)
7168 continue;
7169
7170 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7171 DEBUG2(ql4_printk(KERN_INFO, ha,
7172 "%s: DDB index [%d]\n", __func__,
7173 ddb_entry->fw_ddb_index));
7174 do {
7175 ret = qla4xxx_get_fwddb_entry(ha,
7176 ddb_entry->fw_ddb_index,
7177 fw_ddb_entry, fw_ddb_entry_dma,
7178 NULL, NULL, &ddb_state, NULL,
7179 NULL, NULL);
7180 if (ret == QLA_ERROR)
7181 goto exit_login_resp;
7182
7183 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
7184 (ddb_state == DDB_DS_SESSION_FAILED))
7185 break;
7186
7187 schedule_timeout_uninterruptible(HZ);
7188
7189 } while ((time_after(wtime, jiffies)));
7190
7191 if (!time_after(wtime, jiffies)) {
7192 DEBUG2(ql4_printk(KERN_INFO, ha,
7193 "%s: Login response wait timer expired\n",
7194 __func__));
7195 goto exit_login_resp;
7196 }
7197 }
7198 }
7199
7200 exit_login_resp:
7201 if (fw_ddb_entry)
7202 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7203 fw_ddb_entry, fw_ddb_entry_dma);
7204 }
7205
7206 /**
7207 * qla4xxx_probe_adapter - callback function to probe HBA
7208 * @pdev: pointer to pci_dev structure
7209 * @pci_device_id: pointer to pci_device entry
7210 *
7211 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
7212 * It returns zero if successful. It also initializes all data necessary for
7213 * the driver.
7214 **/
7215 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
7216 const struct pci_device_id *ent)
7217 {
7218 int ret = -ENODEV, status;
7219 struct Scsi_Host *host;
7220 struct scsi_qla_host *ha;
7221 uint8_t init_retry_count = 0;
7222 char buf[34];
7223 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
7224 uint32_t dev_state;
7225
7226 if (pci_enable_device(pdev))
7227 return -1;
7228
7229 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
7230 if (host == NULL) {
7231 printk(KERN_WARNING
7232 "qla4xxx: Couldn't allocate host from scsi layer!\n");
7233 goto probe_disable_device;
7234 }
7235
7236 /* Clear our data area */
7237 ha = to_qla_host(host);
7238 memset(ha, 0, sizeof(*ha));
7239
7240 /* Save the information from PCI BIOS. */
7241 ha->pdev = pdev;
7242 ha->host = host;
7243 ha->host_no = host->host_no;
7244 ha->func_num = PCI_FUNC(ha->pdev->devfn);
7245
7246 pci_enable_pcie_error_reporting(pdev);
7247
7248 /* Setup Runtime configurable options */
7249 if (is_qla8022(ha)) {
7250 ha->isp_ops = &qla4_82xx_isp_ops;
7251 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
7252 ha->qdr_sn_window = -1;
7253 ha->ddr_mn_window = -1;
7254 ha->curr_window = 255;
7255 nx_legacy_intr = &legacy_intr[ha->func_num];
7256 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
7257 ha->nx_legacy_intr.tgt_status_reg =
7258 nx_legacy_intr->tgt_status_reg;
7259 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
7260 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
7261 } else if (is_qla8032(ha) || is_qla8042(ha)) {
7262 ha->isp_ops = &qla4_83xx_isp_ops;
7263 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
7264 } else {
7265 ha->isp_ops = &qla4xxx_isp_ops;
7266 }
7267
7268 if (is_qla80XX(ha)) {
7269 rwlock_init(&ha->hw_lock);
7270 ha->pf_bit = ha->func_num << 16;
7271 /* Set EEH reset type to fundamental if required by hba */
7272 pdev->needs_freset = 1;
7273 }
7274
7275 /* Configure PCI I/O space. */
7276 ret = ha->isp_ops->iospace_config(ha);
7277 if (ret)
7278 goto probe_failed_ioconfig;
7279
7280 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
7281 pdev->device, pdev->irq, ha->reg);
7282
7283 qla4xxx_config_dma_addressing(ha);
7284
7285 /* Initialize lists and spinlocks. */
7286 INIT_LIST_HEAD(&ha->free_srb_q);
7287
7288 mutex_init(&ha->mbox_sem);
7289 mutex_init(&ha->chap_sem);
7290 init_completion(&ha->mbx_intr_comp);
7291 init_completion(&ha->disable_acb_comp);
7292
7293 spin_lock_init(&ha->hardware_lock);
7294 spin_lock_init(&ha->work_lock);
7295
7296 /* Initialize work list */
7297 INIT_LIST_HEAD(&ha->work_list);
7298
7299 /* Allocate dma buffers */
7300 if (qla4xxx_mem_alloc(ha)) {
7301 ql4_printk(KERN_WARNING, ha,
7302 "[ERROR] Failed to allocate memory for adapter\n");
7303
7304 ret = -ENOMEM;
7305 goto probe_failed;
7306 }
7307
7308 host->cmd_per_lun = 3;
7309 host->max_channel = 0;
7310 host->max_lun = MAX_LUNS - 1;
7311 host->max_id = MAX_TARGETS;
7312 host->max_cmd_len = IOCB_MAX_CDB_LEN;
7313 host->can_queue = MAX_SRBS ;
7314 host->transportt = qla4xxx_scsi_transport;
7315
7316 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
7317 if (ret) {
7318 ql4_printk(KERN_WARNING, ha,
7319 "%s: scsi_init_shared_tag_map failed\n", __func__);
7320 goto probe_failed;
7321 }
7322
7323 pci_set_drvdata(pdev, ha);
7324
7325 ret = scsi_add_host(host, &pdev->dev);
7326 if (ret)
7327 goto probe_failed;
7328
7329 if (is_qla80XX(ha))
7330 qla4_8xxx_get_flash_info(ha);
7331
7332 if (is_qla8032(ha) || is_qla8042(ha)) {
7333 qla4_83xx_read_reset_template(ha);
7334 /*
7335 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
7336 * If DONRESET_BIT0 is set, drivers should not set dev_state
7337 * to NEED_RESET. But if NEED_RESET is set, drivers should
7338 * should honor the reset.
7339 */
7340 if (ql4xdontresethba == 1)
7341 qla4_83xx_set_idc_dontreset(ha);
7342 }
7343
7344 /*
7345 * Initialize the Host adapter request/response queues and
7346 * firmware
7347 * NOTE: interrupts enabled upon successful completion
7348 */
7349 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7350
7351 /* Dont retry adapter initialization if IRQ allocation failed */
7352 if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7353 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7354 __func__);
7355 goto skip_retry_init;
7356 }
7357
7358 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
7359 init_retry_count++ < MAX_INIT_RETRIES) {
7360
7361 if (is_qla80XX(ha)) {
7362 ha->isp_ops->idc_lock(ha);
7363 dev_state = qla4_8xxx_rd_direct(ha,
7364 QLA8XXX_CRB_DEV_STATE);
7365 ha->isp_ops->idc_unlock(ha);
7366 if (dev_state == QLA8XXX_DEV_FAILED) {
7367 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
7368 "initialize adapter. H/W is in failed state\n",
7369 __func__);
7370 break;
7371 }
7372 }
7373 DEBUG2(printk("scsi: %s: retrying adapter initialization "
7374 "(%d)\n", __func__, init_retry_count));
7375
7376 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
7377 continue;
7378
7379 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7380 }
7381
7382 skip_retry_init:
7383 if (!test_bit(AF_ONLINE, &ha->flags)) {
7384 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7385
7386 if ((is_qla8022(ha) && ql4xdontresethba) ||
7387 ((is_qla8032(ha) || is_qla8042(ha)) &&
7388 qla4_83xx_idc_dontreset(ha))) {
7389 /* Put the device in failed state. */
7390 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7391 ha->isp_ops->idc_lock(ha);
7392 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7393 QLA8XXX_DEV_FAILED);
7394 ha->isp_ops->idc_unlock(ha);
7395 }
7396 ret = -ENODEV;
7397 goto remove_host;
7398 }
7399
7400 /* Startup the kernel thread for this host adapter. */
7401 DEBUG2(printk("scsi: %s: Starting kernel thread for "
7402 "qla4xxx_dpc\n", __func__));
7403 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
7404 ha->dpc_thread = create_singlethread_workqueue(buf);
7405 if (!ha->dpc_thread) {
7406 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
7407 ret = -ENODEV;
7408 goto remove_host;
7409 }
7410 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
7411
7412 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
7413 ha->host_no);
7414 if (!ha->task_wq) {
7415 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
7416 ret = -ENODEV;
7417 goto remove_host;
7418 }
7419
7420 /*
7421 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
7422 * (which is called indirectly by qla4xxx_initialize_adapter),
7423 * so that irqs will be registered after crbinit but before
7424 * mbx_intr_enable.
7425 */
7426 if (is_qla40XX(ha)) {
7427 ret = qla4xxx_request_irqs(ha);
7428 if (ret) {
7429 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
7430 "interrupt %d already in use.\n", pdev->irq);
7431 goto remove_host;
7432 }
7433 }
7434
7435 pci_save_state(ha->pdev);
7436 ha->isp_ops->enable_intrs(ha);
7437
7438 /* Start timer thread. */
7439 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
7440
7441 set_bit(AF_INIT_DONE, &ha->flags);
7442
7443 qla4_8xxx_alloc_sysfs_attr(ha);
7444
7445 printk(KERN_INFO
7446 " QLogic iSCSI HBA Driver version: %s\n"
7447 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7448 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7449 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
7450 ha->fw_info.fw_patch, ha->fw_info.fw_build);
7451
7452 /* Set the driver version */
7453 if (is_qla80XX(ha))
7454 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
7455
7456 if (qla4xxx_setup_boot_info(ha))
7457 ql4_printk(KERN_ERR, ha,
7458 "%s: No iSCSI boot target configured\n", __func__);
7459
7460 if (qla4xxx_sysfs_ddb_export(ha))
7461 ql4_printk(KERN_ERR, ha,
7462 "%s: Error exporting ddb to sysfs\n", __func__);
7463
7464 /* Perform the build ddb list and login to each */
7465 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
7466 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
7467 qla4xxx_wait_login_resp_boot_tgt(ha);
7468
7469 qla4xxx_create_chap_list(ha);
7470
7471 qla4xxx_create_ifaces(ha);
7472 return 0;
7473
7474 remove_host:
7475 scsi_remove_host(ha->host);
7476
7477 probe_failed:
7478 qla4xxx_free_adapter(ha);
7479
7480 probe_failed_ioconfig:
7481 pci_disable_pcie_error_reporting(pdev);
7482 scsi_host_put(ha->host);
7483
7484 probe_disable_device:
7485 pci_disable_device(pdev);
7486
7487 return ret;
7488 }
7489
7490 /**
7491 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
7492 * @ha: pointer to adapter structure
7493 *
7494 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
7495 * so that the other port will not re-initialize while in the process of
7496 * removing the ha due to driver unload or hba hotplug.
7497 **/
7498 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
7499 {
7500 struct scsi_qla_host *other_ha = NULL;
7501 struct pci_dev *other_pdev = NULL;
7502 int fn = ISP4XXX_PCI_FN_2;
7503
7504 /*iscsi function numbers for ISP4xxx is 1 and 3*/
7505 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
7506 fn = ISP4XXX_PCI_FN_1;
7507
7508 other_pdev =
7509 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7510 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7511 fn));
7512
7513 /* Get other_ha if other_pdev is valid and state is enable*/
7514 if (other_pdev) {
7515 if (atomic_read(&other_pdev->enable_cnt)) {
7516 other_ha = pci_get_drvdata(other_pdev);
7517 if (other_ha) {
7518 set_bit(AF_HA_REMOVAL, &other_ha->flags);
7519 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
7520 "Prevent %s reinit\n", __func__,
7521 dev_name(&other_ha->pdev->dev)));
7522 }
7523 }
7524 pci_dev_put(other_pdev);
7525 }
7526 }
7527
7528 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
7529 {
7530 struct ddb_entry *ddb_entry;
7531 int options;
7532 int idx;
7533
7534 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7535
7536 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7537 if ((ddb_entry != NULL) &&
7538 (ddb_entry->ddb_type == FLASH_DDB)) {
7539
7540 options = LOGOUT_OPTION_CLOSE_SESSION;
7541 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
7542 == QLA_ERROR)
7543 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
7544 __func__);
7545
7546 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7547 /*
7548 * we have decremented the reference count of the driver
7549 * when we setup the session to have the driver unload
7550 * to be seamless without actually destroying the
7551 * session
7552 **/
7553 try_module_get(qla4xxx_iscsi_transport.owner);
7554 iscsi_destroy_endpoint(ddb_entry->conn->ep);
7555 qla4xxx_free_ddb(ha, ddb_entry);
7556 iscsi_session_teardown(ddb_entry->sess);
7557 }
7558 }
7559 }
7560 /**
7561 * qla4xxx_remove_adapter - callback function to remove adapter.
7562 * @pci_dev: PCI device pointer
7563 **/
7564 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
7565 {
7566 struct scsi_qla_host *ha;
7567
7568 /*
7569 * If the PCI device is disabled then it means probe_adapter had
7570 * failed and resources already cleaned up on probe_adapter exit.
7571 */
7572 if (!pci_is_enabled(pdev))
7573 return;
7574
7575 ha = pci_get_drvdata(pdev);
7576
7577 if (is_qla40XX(ha))
7578 qla4xxx_prevent_other_port_reinit(ha);
7579
7580 /* destroy iface from sysfs */
7581 qla4xxx_destroy_ifaces(ha);
7582
7583 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
7584 iscsi_boot_destroy_kset(ha->boot_kset);
7585
7586 qla4xxx_destroy_fw_ddb_session(ha);
7587 qla4_8xxx_free_sysfs_attr(ha);
7588
7589 qla4xxx_sysfs_ddb_remove(ha);
7590 scsi_remove_host(ha->host);
7591
7592 qla4xxx_free_adapter(ha);
7593
7594 scsi_host_put(ha->host);
7595
7596 pci_disable_pcie_error_reporting(pdev);
7597 pci_disable_device(pdev);
7598 pci_set_drvdata(pdev, NULL);
7599 }
7600
7601 /**
7602 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
7603 * @ha: HA context
7604 *
7605 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
7606 * supported addressing method.
7607 */
7608 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
7609 {
7610 int retval;
7611
7612 /* Update our PCI device dma_mask for full 64 bit mask */
7613 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
7614 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7615 dev_dbg(&ha->pdev->dev,
7616 "Failed to set 64 bit PCI consistent mask; "
7617 "using 32 bit.\n");
7618 retval = pci_set_consistent_dma_mask(ha->pdev,
7619 DMA_BIT_MASK(32));
7620 }
7621 } else
7622 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
7623 }
7624
7625 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
7626 {
7627 struct iscsi_cls_session *cls_sess;
7628 struct iscsi_session *sess;
7629 struct ddb_entry *ddb;
7630 int queue_depth = QL4_DEF_QDEPTH;
7631
7632 cls_sess = starget_to_session(sdev->sdev_target);
7633 sess = cls_sess->dd_data;
7634 ddb = sess->dd_data;
7635
7636 sdev->hostdata = ddb;
7637 sdev->tagged_supported = 1;
7638
7639 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
7640 queue_depth = ql4xmaxqdepth;
7641
7642 scsi_activate_tcq(sdev, queue_depth);
7643 return 0;
7644 }
7645
7646 static int qla4xxx_slave_configure(struct scsi_device *sdev)
7647 {
7648 sdev->tagged_supported = 1;
7649 return 0;
7650 }
7651
7652 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
7653 {
7654 scsi_deactivate_tcq(sdev, 1);
7655 }
7656
7657 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
7658 int reason)
7659 {
7660 if (!ql4xqfulltracking)
7661 return -EOPNOTSUPP;
7662
7663 return iscsi_change_queue_depth(sdev, qdepth, reason);
7664 }
7665
7666 /**
7667 * qla4xxx_del_from_active_array - returns an active srb
7668 * @ha: Pointer to host adapter structure.
7669 * @index: index into the active_array
7670 *
7671 * This routine removes and returns the srb at the specified index
7672 **/
7673 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
7674 uint32_t index)
7675 {
7676 struct srb *srb = NULL;
7677 struct scsi_cmnd *cmd = NULL;
7678
7679 cmd = scsi_host_find_tag(ha->host, index);
7680 if (!cmd)
7681 return srb;
7682
7683 srb = (struct srb *)CMD_SP(cmd);
7684 if (!srb)
7685 return srb;
7686
7687 /* update counters */
7688 if (srb->flags & SRB_DMA_VALID) {
7689 ha->iocb_cnt -= srb->iocb_cnt;
7690 if (srb->cmd)
7691 srb->cmd->host_scribble =
7692 (unsigned char *)(unsigned long) MAX_SRBS;
7693 }
7694 return srb;
7695 }
7696
7697 /**
7698 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
7699 * @ha: Pointer to host adapter structure.
7700 * @cmd: Scsi Command to wait on.
7701 *
7702 * This routine waits for the command to be returned by the Firmware
7703 * for some max time.
7704 **/
7705 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
7706 struct scsi_cmnd *cmd)
7707 {
7708 int done = 0;
7709 struct srb *rp;
7710 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
7711 int ret = SUCCESS;
7712
7713 /* Dont wait on command if PCI error is being handled
7714 * by PCI AER driver
7715 */
7716 if (unlikely(pci_channel_offline(ha->pdev)) ||
7717 (test_bit(AF_EEH_BUSY, &ha->flags))) {
7718 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
7719 ha->host_no, __func__);
7720 return ret;
7721 }
7722
7723 do {
7724 /* Checking to see if its returned to OS */
7725 rp = (struct srb *) CMD_SP(cmd);
7726 if (rp == NULL) {
7727 done++;
7728 break;
7729 }
7730
7731 msleep(2000);
7732 } while (max_wait_time--);
7733
7734 return done;
7735 }
7736
7737 /**
7738 * qla4xxx_wait_for_hba_online - waits for HBA to come online
7739 * @ha: Pointer to host adapter structure
7740 **/
7741 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
7742 {
7743 unsigned long wait_online;
7744
7745 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
7746 while (time_before(jiffies, wait_online)) {
7747
7748 if (adapter_up(ha))
7749 return QLA_SUCCESS;
7750
7751 msleep(2000);
7752 }
7753
7754 return QLA_ERROR;
7755 }
7756
7757 /**
7758 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
7759 * @ha: pointer to HBA
7760 * @t: target id
7761 * @l: lun id
7762 *
7763 * This function waits for all outstanding commands to a lun to complete. It
7764 * returns 0 if all pending commands are returned and 1 otherwise.
7765 **/
7766 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
7767 struct scsi_target *stgt,
7768 struct scsi_device *sdev)
7769 {
7770 int cnt;
7771 int status = 0;
7772 struct scsi_cmnd *cmd;
7773
7774 /*
7775 * Waiting for all commands for the designated target or dev
7776 * in the active array
7777 */
7778 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
7779 cmd = scsi_host_find_tag(ha->host, cnt);
7780 if (cmd && stgt == scsi_target(cmd->device) &&
7781 (!sdev || sdev == cmd->device)) {
7782 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7783 status++;
7784 break;
7785 }
7786 }
7787 }
7788 return status;
7789 }
7790
7791 /**
7792 * qla4xxx_eh_abort - callback for abort task.
7793 * @cmd: Pointer to Linux's SCSI command structure
7794 *
7795 * This routine is called by the Linux OS to abort the specified
7796 * command.
7797 **/
7798 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
7799 {
7800 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7801 unsigned int id = cmd->device->id;
7802 unsigned int lun = cmd->device->lun;
7803 unsigned long flags;
7804 struct srb *srb = NULL;
7805 int ret = SUCCESS;
7806 int wait = 0;
7807
7808 ql4_printk(KERN_INFO, ha,
7809 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
7810 ha->host_no, id, lun, cmd);
7811
7812 spin_lock_irqsave(&ha->hardware_lock, flags);
7813 srb = (struct srb *) CMD_SP(cmd);
7814 if (!srb) {
7815 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7816 return SUCCESS;
7817 }
7818 kref_get(&srb->srb_ref);
7819 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7820
7821 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
7822 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
7823 ha->host_no, id, lun));
7824 ret = FAILED;
7825 } else {
7826 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
7827 ha->host_no, id, lun));
7828 wait = 1;
7829 }
7830
7831 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
7832
7833 /* Wait for command to complete */
7834 if (wait) {
7835 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7836 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
7837 ha->host_no, id, lun));
7838 ret = FAILED;
7839 }
7840 }
7841
7842 ql4_printk(KERN_INFO, ha,
7843 "scsi%ld:%d:%d: Abort command - %s\n",
7844 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
7845
7846 return ret;
7847 }
7848
7849 /**
7850 * qla4xxx_eh_device_reset - callback for target reset.
7851 * @cmd: Pointer to Linux's SCSI command structure
7852 *
7853 * This routine is called by the Linux OS to reset all luns on the
7854 * specified target.
7855 **/
7856 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
7857 {
7858 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7859 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7860 int ret = FAILED, stat;
7861
7862 if (!ddb_entry)
7863 return ret;
7864
7865 ret = iscsi_block_scsi_eh(cmd);
7866 if (ret)
7867 return ret;
7868 ret = FAILED;
7869
7870 ql4_printk(KERN_INFO, ha,
7871 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
7872 cmd->device->channel, cmd->device->id, cmd->device->lun);
7873
7874 DEBUG2(printk(KERN_INFO
7875 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
7876 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
7877 cmd, jiffies, cmd->request->timeout / HZ,
7878 ha->dpc_flags, cmd->result, cmd->allowed));
7879
7880 /* FIXME: wait for hba to go online */
7881 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
7882 if (stat != QLA_SUCCESS) {
7883 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
7884 goto eh_dev_reset_done;
7885 }
7886
7887 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7888 cmd->device)) {
7889 ql4_printk(KERN_INFO, ha,
7890 "DEVICE RESET FAILED - waiting for "
7891 "commands.\n");
7892 goto eh_dev_reset_done;
7893 }
7894
7895 /* Send marker. */
7896 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7897 MM_LUN_RESET) != QLA_SUCCESS)
7898 goto eh_dev_reset_done;
7899
7900 ql4_printk(KERN_INFO, ha,
7901 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
7902 ha->host_no, cmd->device->channel, cmd->device->id,
7903 cmd->device->lun);
7904
7905 ret = SUCCESS;
7906
7907 eh_dev_reset_done:
7908
7909 return ret;
7910 }
7911
7912 /**
7913 * qla4xxx_eh_target_reset - callback for target reset.
7914 * @cmd: Pointer to Linux's SCSI command structure
7915 *
7916 * This routine is called by the Linux OS to reset the target.
7917 **/
7918 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
7919 {
7920 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7921 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7922 int stat, ret;
7923
7924 if (!ddb_entry)
7925 return FAILED;
7926
7927 ret = iscsi_block_scsi_eh(cmd);
7928 if (ret)
7929 return ret;
7930
7931 starget_printk(KERN_INFO, scsi_target(cmd->device),
7932 "WARM TARGET RESET ISSUED.\n");
7933
7934 DEBUG2(printk(KERN_INFO
7935 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
7936 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
7937 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
7938 ha->dpc_flags, cmd->result, cmd->allowed));
7939
7940 stat = qla4xxx_reset_target(ha, ddb_entry);
7941 if (stat != QLA_SUCCESS) {
7942 starget_printk(KERN_INFO, scsi_target(cmd->device),
7943 "WARM TARGET RESET FAILED.\n");
7944 return FAILED;
7945 }
7946
7947 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7948 NULL)) {
7949 starget_printk(KERN_INFO, scsi_target(cmd->device),
7950 "WARM TARGET DEVICE RESET FAILED - "
7951 "waiting for commands.\n");
7952 return FAILED;
7953 }
7954
7955 /* Send marker. */
7956 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7957 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
7958 starget_printk(KERN_INFO, scsi_target(cmd->device),
7959 "WARM TARGET DEVICE RESET FAILED - "
7960 "marker iocb failed.\n");
7961 return FAILED;
7962 }
7963
7964 starget_printk(KERN_INFO, scsi_target(cmd->device),
7965 "WARM TARGET RESET SUCCEEDED.\n");
7966 return SUCCESS;
7967 }
7968
7969 /**
7970 * qla4xxx_is_eh_active - check if error handler is running
7971 * @shost: Pointer to SCSI Host struct
7972 *
7973 * This routine finds that if reset host is called in EH
7974 * scenario or from some application like sg_reset
7975 **/
7976 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
7977 {
7978 if (shost->shost_state == SHOST_RECOVERY)
7979 return 1;
7980 return 0;
7981 }
7982
7983 /**
7984 * qla4xxx_eh_host_reset - kernel callback
7985 * @cmd: Pointer to Linux's SCSI command structure
7986 *
7987 * This routine is invoked by the Linux kernel to perform fatal error
7988 * recovery on the specified adapter.
7989 **/
7990 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
7991 {
7992 int return_status = FAILED;
7993 struct scsi_qla_host *ha;
7994
7995 ha = to_qla_host(cmd->device->host);
7996
7997 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
7998 qla4_83xx_set_idc_dontreset(ha);
7999
8000 /*
8001 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
8002 * protocol drivers, we should not set device_state to NEED_RESET
8003 */
8004 if (ql4xdontresethba ||
8005 ((is_qla8032(ha) || is_qla8042(ha)) &&
8006 qla4_83xx_idc_dontreset(ha))) {
8007 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
8008 ha->host_no, __func__));
8009
8010 /* Clear outstanding srb in queues */
8011 if (qla4xxx_is_eh_active(cmd->device->host))
8012 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
8013
8014 return FAILED;
8015 }
8016
8017 ql4_printk(KERN_INFO, ha,
8018 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
8019 cmd->device->channel, cmd->device->id, cmd->device->lun);
8020
8021 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
8022 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
8023 "DEAD.\n", ha->host_no, cmd->device->channel,
8024 __func__));
8025
8026 return FAILED;
8027 }
8028
8029 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8030 if (is_qla80XX(ha))
8031 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
8032 else
8033 set_bit(DPC_RESET_HA, &ha->dpc_flags);
8034 }
8035
8036 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
8037 return_status = SUCCESS;
8038
8039 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
8040 return_status == FAILED ? "FAILED" : "SUCCEEDED");
8041
8042 return return_status;
8043 }
8044
8045 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
8046 {
8047 uint32_t mbox_cmd[MBOX_REG_COUNT];
8048 uint32_t mbox_sts[MBOX_REG_COUNT];
8049 struct addr_ctrl_blk_def *acb = NULL;
8050 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
8051 int rval = QLA_SUCCESS;
8052 dma_addr_t acb_dma;
8053
8054 acb = dma_alloc_coherent(&ha->pdev->dev,
8055 sizeof(struct addr_ctrl_blk_def),
8056 &acb_dma, GFP_KERNEL);
8057 if (!acb) {
8058 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
8059 __func__);
8060 rval = -ENOMEM;
8061 goto exit_port_reset;
8062 }
8063
8064 memset(acb, 0, acb_len);
8065
8066 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
8067 if (rval != QLA_SUCCESS) {
8068 rval = -EIO;
8069 goto exit_free_acb;
8070 }
8071
8072 rval = qla4xxx_disable_acb(ha);
8073 if (rval != QLA_SUCCESS) {
8074 rval = -EIO;
8075 goto exit_free_acb;
8076 }
8077
8078 wait_for_completion_timeout(&ha->disable_acb_comp,
8079 DISABLE_ACB_TOV * HZ);
8080
8081 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
8082 if (rval != QLA_SUCCESS) {
8083 rval = -EIO;
8084 goto exit_free_acb;
8085 }
8086
8087 exit_free_acb:
8088 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
8089 acb, acb_dma);
8090 exit_port_reset:
8091 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
8092 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
8093 return rval;
8094 }
8095
8096 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
8097 {
8098 struct scsi_qla_host *ha = to_qla_host(shost);
8099 int rval = QLA_SUCCESS;
8100 uint32_t idc_ctrl;
8101
8102 if (ql4xdontresethba) {
8103 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
8104 __func__));
8105 rval = -EPERM;
8106 goto exit_host_reset;
8107 }
8108
8109 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
8110 goto recover_adapter;
8111
8112 switch (reset_type) {
8113 case SCSI_ADAPTER_RESET:
8114 set_bit(DPC_RESET_HA, &ha->dpc_flags);
8115 break;
8116 case SCSI_FIRMWARE_RESET:
8117 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8118 if (is_qla80XX(ha))
8119 /* set firmware context reset */
8120 set_bit(DPC_RESET_HA_FW_CONTEXT,
8121 &ha->dpc_flags);
8122 else {
8123 rval = qla4xxx_context_reset(ha);
8124 goto exit_host_reset;
8125 }
8126 }
8127 break;
8128 }
8129
8130 recover_adapter:
8131 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
8132 * reset is issued by application */
8133 if ((is_qla8032(ha) || is_qla8042(ha)) &&
8134 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8135 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
8136 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
8137 (idc_ctrl | GRACEFUL_RESET_BIT1));
8138 }
8139
8140 rval = qla4xxx_recover_adapter(ha);
8141 if (rval != QLA_SUCCESS) {
8142 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
8143 __func__));
8144 rval = -EIO;
8145 }
8146
8147 exit_host_reset:
8148 return rval;
8149 }
8150
8151 /* PCI AER driver recovers from all correctable errors w/o
8152 * driver intervention. For uncorrectable errors PCI AER
8153 * driver calls the following device driver's callbacks
8154 *
8155 * - Fatal Errors - link_reset
8156 * - Non-Fatal Errors - driver's pci_error_detected() which
8157 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
8158 *
8159 * PCI AER driver calls
8160 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
8161 * returns RECOVERED or NEED_RESET if fw_hung
8162 * NEED_RESET - driver's slot_reset()
8163 * DISCONNECT - device is dead & cannot recover
8164 * RECOVERED - driver's pci_resume()
8165 */
8166 static pci_ers_result_t
8167 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8168 {
8169 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8170
8171 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
8172 ha->host_no, __func__, state);
8173
8174 if (!is_aer_supported(ha))
8175 return PCI_ERS_RESULT_NONE;
8176
8177 switch (state) {
8178 case pci_channel_io_normal:
8179 clear_bit(AF_EEH_BUSY, &ha->flags);
8180 return PCI_ERS_RESULT_CAN_RECOVER;
8181 case pci_channel_io_frozen:
8182 set_bit(AF_EEH_BUSY, &ha->flags);
8183 qla4xxx_mailbox_premature_completion(ha);
8184 qla4xxx_free_irqs(ha);
8185 pci_disable_device(pdev);
8186 /* Return back all IOs */
8187 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
8188 return PCI_ERS_RESULT_NEED_RESET;
8189 case pci_channel_io_perm_failure:
8190 set_bit(AF_EEH_BUSY, &ha->flags);
8191 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
8192 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
8193 return PCI_ERS_RESULT_DISCONNECT;
8194 }
8195 return PCI_ERS_RESULT_NEED_RESET;
8196 }
8197
8198 /**
8199 * qla4xxx_pci_mmio_enabled() gets called if
8200 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
8201 * and read/write to the device still works.
8202 **/
8203 static pci_ers_result_t
8204 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
8205 {
8206 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8207
8208 if (!is_aer_supported(ha))
8209 return PCI_ERS_RESULT_NONE;
8210
8211 return PCI_ERS_RESULT_RECOVERED;
8212 }
8213
8214 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
8215 {
8216 uint32_t rval = QLA_ERROR;
8217 int fn;
8218 struct pci_dev *other_pdev = NULL;
8219
8220 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
8221
8222 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8223
8224 if (test_bit(AF_ONLINE, &ha->flags)) {
8225 clear_bit(AF_ONLINE, &ha->flags);
8226 clear_bit(AF_LINK_UP, &ha->flags);
8227 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
8228 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
8229 }
8230
8231 fn = PCI_FUNC(ha->pdev->devfn);
8232 while (fn > 0) {
8233 fn--;
8234 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
8235 "func %x\n", ha->host_no, __func__, fn);
8236 /* Get the pci device given the domain, bus,
8237 * slot/function number */
8238 other_pdev =
8239 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8240 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8241 fn));
8242
8243 if (!other_pdev)
8244 continue;
8245
8246 if (atomic_read(&other_pdev->enable_cnt)) {
8247 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
8248 "func in enabled state%x\n", ha->host_no,
8249 __func__, fn);
8250 pci_dev_put(other_pdev);
8251 break;
8252 }
8253 pci_dev_put(other_pdev);
8254 }
8255
8256 /* The first function on the card, the reset owner will
8257 * start & initialize the firmware. The other functions
8258 * on the card will reset the firmware context
8259 */
8260 if (!fn) {
8261 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
8262 "0x%x is the owner\n", ha->host_no, __func__,
8263 ha->pdev->devfn);
8264
8265 ha->isp_ops->idc_lock(ha);
8266 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8267 QLA8XXX_DEV_COLD);
8268 ha->isp_ops->idc_unlock(ha);
8269
8270 rval = qla4_8xxx_update_idc_reg(ha);
8271 if (rval == QLA_ERROR) {
8272 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
8273 ha->host_no, __func__);
8274 ha->isp_ops->idc_lock(ha);
8275 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8276 QLA8XXX_DEV_FAILED);
8277 ha->isp_ops->idc_unlock(ha);
8278 goto exit_error_recovery;
8279 }
8280
8281 clear_bit(AF_FW_RECOVERY, &ha->flags);
8282 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8283
8284 if (rval != QLA_SUCCESS) {
8285 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8286 "FAILED\n", ha->host_no, __func__);
8287 ha->isp_ops->idc_lock(ha);
8288 qla4_8xxx_clear_drv_active(ha);
8289 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8290 QLA8XXX_DEV_FAILED);
8291 ha->isp_ops->idc_unlock(ha);
8292 } else {
8293 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8294 "READY\n", ha->host_no, __func__);
8295 ha->isp_ops->idc_lock(ha);
8296 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8297 QLA8XXX_DEV_READY);
8298 /* Clear driver state register */
8299 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
8300 qla4_8xxx_set_drv_active(ha);
8301 ha->isp_ops->idc_unlock(ha);
8302 ha->isp_ops->enable_intrs(ha);
8303 }
8304 } else {
8305 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
8306 "the reset owner\n", ha->host_no, __func__,
8307 ha->pdev->devfn);
8308 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
8309 QLA8XXX_DEV_READY)) {
8310 clear_bit(AF_FW_RECOVERY, &ha->flags);
8311 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8312 if (rval == QLA_SUCCESS)
8313 ha->isp_ops->enable_intrs(ha);
8314
8315 ha->isp_ops->idc_lock(ha);
8316 qla4_8xxx_set_drv_active(ha);
8317 ha->isp_ops->idc_unlock(ha);
8318 }
8319 }
8320 exit_error_recovery:
8321 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8322 return rval;
8323 }
8324
8325 static pci_ers_result_t
8326 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
8327 {
8328 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
8329 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8330 int rc;
8331
8332 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
8333 ha->host_no, __func__);
8334
8335 if (!is_aer_supported(ha))
8336 return PCI_ERS_RESULT_NONE;
8337
8338 /* Restore the saved state of PCIe device -
8339 * BAR registers, PCI Config space, PCIX, MSI,
8340 * IOV states
8341 */
8342 pci_restore_state(pdev);
8343
8344 /* pci_restore_state() clears the saved_state flag of the device
8345 * save restored state which resets saved_state flag
8346 */
8347 pci_save_state(pdev);
8348
8349 /* Initialize device or resume if in suspended state */
8350 rc = pci_enable_device(pdev);
8351 if (rc) {
8352 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
8353 "device after reset\n", ha->host_no, __func__);
8354 goto exit_slot_reset;
8355 }
8356
8357 ha->isp_ops->disable_intrs(ha);
8358
8359 if (is_qla80XX(ha)) {
8360 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
8361 ret = PCI_ERS_RESULT_RECOVERED;
8362 goto exit_slot_reset;
8363 } else
8364 goto exit_slot_reset;
8365 }
8366
8367 exit_slot_reset:
8368 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
8369 "device after reset\n", ha->host_no, __func__, ret);
8370 return ret;
8371 }
8372
8373 static void
8374 qla4xxx_pci_resume(struct pci_dev *pdev)
8375 {
8376 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8377 int ret;
8378
8379 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
8380 ha->host_no, __func__);
8381
8382 ret = qla4xxx_wait_for_hba_online(ha);
8383 if (ret != QLA_SUCCESS) {
8384 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
8385 "resume I/O from slot/link_reset\n", ha->host_no,
8386 __func__);
8387 }
8388
8389 pci_cleanup_aer_uncorrect_error_status(pdev);
8390 clear_bit(AF_EEH_BUSY, &ha->flags);
8391 }
8392
8393 static const struct pci_error_handlers qla4xxx_err_handler = {
8394 .error_detected = qla4xxx_pci_error_detected,
8395 .mmio_enabled = qla4xxx_pci_mmio_enabled,
8396 .slot_reset = qla4xxx_pci_slot_reset,
8397 .resume = qla4xxx_pci_resume,
8398 };
8399
8400 static struct pci_device_id qla4xxx_pci_tbl[] = {
8401 {
8402 .vendor = PCI_VENDOR_ID_QLOGIC,
8403 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
8404 .subvendor = PCI_ANY_ID,
8405 .subdevice = PCI_ANY_ID,
8406 },
8407 {
8408 .vendor = PCI_VENDOR_ID_QLOGIC,
8409 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
8410 .subvendor = PCI_ANY_ID,
8411 .subdevice = PCI_ANY_ID,
8412 },
8413 {
8414 .vendor = PCI_VENDOR_ID_QLOGIC,
8415 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
8416 .subvendor = PCI_ANY_ID,
8417 .subdevice = PCI_ANY_ID,
8418 },
8419 {
8420 .vendor = PCI_VENDOR_ID_QLOGIC,
8421 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
8422 .subvendor = PCI_ANY_ID,
8423 .subdevice = PCI_ANY_ID,
8424 },
8425 {
8426 .vendor = PCI_VENDOR_ID_QLOGIC,
8427 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
8428 .subvendor = PCI_ANY_ID,
8429 .subdevice = PCI_ANY_ID,
8430 },
8431 {
8432 .vendor = PCI_VENDOR_ID_QLOGIC,
8433 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
8434 .subvendor = PCI_ANY_ID,
8435 .subdevice = PCI_ANY_ID,
8436 },
8437 {0, 0},
8438 };
8439 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
8440
8441 static struct pci_driver qla4xxx_pci_driver = {
8442 .name = DRIVER_NAME,
8443 .id_table = qla4xxx_pci_tbl,
8444 .probe = qla4xxx_probe_adapter,
8445 .remove = qla4xxx_remove_adapter,
8446 .err_handler = &qla4xxx_err_handler,
8447 };
8448
8449 static int __init qla4xxx_module_init(void)
8450 {
8451 int ret;
8452
8453 /* Allocate cache for SRBs. */
8454 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
8455 SLAB_HWCACHE_ALIGN, NULL);
8456 if (srb_cachep == NULL) {
8457 printk(KERN_ERR
8458 "%s: Unable to allocate SRB cache..."
8459 "Failing load!\n", DRIVER_NAME);
8460 ret = -ENOMEM;
8461 goto no_srp_cache;
8462 }
8463
8464 /* Derive version string. */
8465 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
8466 if (ql4xextended_error_logging)
8467 strcat(qla4xxx_version_str, "-debug");
8468
8469 qla4xxx_scsi_transport =
8470 iscsi_register_transport(&qla4xxx_iscsi_transport);
8471 if (!qla4xxx_scsi_transport){
8472 ret = -ENODEV;
8473 goto release_srb_cache;
8474 }
8475
8476 ret = pci_register_driver(&qla4xxx_pci_driver);
8477 if (ret)
8478 goto unregister_transport;
8479
8480 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
8481 return 0;
8482
8483 unregister_transport:
8484 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8485 release_srb_cache:
8486 kmem_cache_destroy(srb_cachep);
8487 no_srp_cache:
8488 return ret;
8489 }
8490
8491 static void __exit qla4xxx_module_exit(void)
8492 {
8493 pci_unregister_driver(&qla4xxx_pci_driver);
8494 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8495 kmem_cache_destroy(srb_cachep);
8496 }
8497
8498 module_init(qla4xxx_module_init);
8499 module_exit(qla4xxx_module_exit);
8500
8501 MODULE_AUTHOR("QLogic Corporation");
8502 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
8503 MODULE_LICENSE("GPL");
8504 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);