2 * S/390 common I/O routines -- channel subsystem call
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/pci.h>
20 #include <asm/chpid.h>
26 #include "cio_debug.h"
31 static void *sei_page
;
32 static void *chsc_page
;
33 static DEFINE_SPINLOCK(chsc_page_lock
);
36 * chsc_error_from_response() - convert a chsc response to an error
37 * @response: chsc response code
39 * Returns an appropriate Linux error code for @response.
41 int chsc_error_from_response(int response
)
65 EXPORT_SYMBOL_GPL(chsc_error_from_response
);
67 struct chsc_ssd_area
{
68 struct chsc_header request
;
72 u16 f_sch
; /* first subchannel */
74 u16 l_sch
; /* last subchannel */
76 struct chsc_header response
;
80 u8 st
: 3; /* subchannel type */
82 u8 unit_addr
; /* unit address */
83 u16 devno
; /* device number */
86 u16 sch
; /* subchannel */
87 u8 chpid
[8]; /* chpids 0-7 */
88 u16 fla
[8]; /* full link addresses 0-7 */
89 } __attribute__ ((packed
));
91 int chsc_get_ssd_info(struct subchannel_id schid
, struct chsc_ssd_info
*ssd
)
93 struct chsc_ssd_area
*ssd_area
;
99 spin_lock_irq(&chsc_page_lock
);
100 memset(chsc_page
, 0, PAGE_SIZE
);
101 ssd_area
= chsc_page
;
102 ssd_area
->request
.length
= 0x0010;
103 ssd_area
->request
.code
= 0x0004;
104 ssd_area
->ssid
= schid
.ssid
;
105 ssd_area
->f_sch
= schid
.sch_no
;
106 ssd_area
->l_sch
= schid
.sch_no
;
108 ccode
= chsc(ssd_area
);
109 /* Check response. */
111 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
114 ret
= chsc_error_from_response(ssd_area
->response
.code
);
116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
117 schid
.ssid
, schid
.sch_no
,
118 ssd_area
->response
.code
);
121 if (!ssd_area
->sch_valid
) {
127 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
128 if ((ssd_area
->st
!= SUBCHANNEL_TYPE_IO
) &&
129 (ssd_area
->st
!= SUBCHANNEL_TYPE_MSG
))
131 ssd
->path_mask
= ssd_area
->path_mask
;
132 ssd
->fla_valid_mask
= ssd_area
->fla_valid_mask
;
133 for (i
= 0; i
< 8; i
++) {
135 if (ssd_area
->path_mask
& mask
) {
136 chp_id_init(&ssd
->chpid
[i
]);
137 ssd
->chpid
[i
].id
= ssd_area
->chpid
[i
];
139 if (ssd_area
->fla_valid_mask
& mask
)
140 ssd
->fla
[i
] = ssd_area
->fla
[i
];
143 spin_unlock_irq(&chsc_page_lock
);
147 static int s390_subchannel_remove_chpid(struct subchannel
*sch
, void *data
)
149 spin_lock_irq(sch
->lock
);
150 if (sch
->driver
&& sch
->driver
->chp_event
)
151 if (sch
->driver
->chp_event(sch
, data
, CHP_OFFLINE
) != 0)
153 spin_unlock_irq(sch
->lock
);
158 spin_unlock_irq(sch
->lock
);
159 css_schedule_eval(sch
->schid
);
163 void chsc_chp_offline(struct chp_id chpid
)
166 struct chp_link link
;
168 sprintf(dbf_txt
, "chpr%x.%02x", chpid
.cssid
, chpid
.id
);
169 CIO_TRACE_EVENT(2, dbf_txt
);
171 if (chp_get_status(chpid
) <= 0)
173 memset(&link
, 0, sizeof(struct chp_link
));
175 /* Wait until previous actions have settled. */
176 css_wait_for_slow_path();
177 for_each_subchannel_staged(s390_subchannel_remove_chpid
, NULL
, &link
);
180 static int s390_process_res_acc_new_sch(struct subchannel_id schid
, void *data
)
184 * We don't know the device yet, but since a path
185 * may be available now to the device we'll have
186 * to do recognition again.
187 * Since we don't have any idea about which chpid
188 * that beast may be on we'll have to do a stsch
189 * on all devices, grr...
191 if (stsch_err(schid
, &schib
))
195 /* Put it on the slow path. */
196 css_schedule_eval(schid
);
200 static int __s390_process_res_acc(struct subchannel
*sch
, void *data
)
202 spin_lock_irq(sch
->lock
);
203 if (sch
->driver
&& sch
->driver
->chp_event
)
204 sch
->driver
->chp_event(sch
, data
, CHP_ONLINE
);
205 spin_unlock_irq(sch
->lock
);
210 static void s390_process_res_acc(struct chp_link
*link
)
214 sprintf(dbf_txt
, "accpr%x.%02x", link
->chpid
.cssid
,
216 CIO_TRACE_EVENT( 2, dbf_txt
);
217 if (link
->fla
!= 0) {
218 sprintf(dbf_txt
, "fla%x", link
->fla
);
219 CIO_TRACE_EVENT( 2, dbf_txt
);
221 /* Wait until previous actions have settled. */
222 css_wait_for_slow_path();
224 * I/O resources may have become accessible.
225 * Scan through all subchannels that may be concerned and
226 * do a validation on those.
227 * The more information we have (info), the less scanning
228 * will we have to do.
230 for_each_subchannel_staged(__s390_process_res_acc
,
231 s390_process_res_acc_new_sch
, link
);
235 __get_chpid_from_lir(void *data
)
241 /* incident-node descriptor */
243 /* attached-node descriptor */
245 /* incident-specific information */
247 } __attribute__ ((packed
)) *lir
;
251 /* NULL link incident record */
253 if (!(lir
->indesc
[0]&0xc0000000))
254 /* node descriptor not valid */
256 if (!(lir
->indesc
[0]&0x10000000))
257 /* don't handle device-type nodes - FIXME */
259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
261 return (u16
) (lir
->indesc
[0]&0x000000ff);
264 struct chsc_sei_nt0_area
{
266 u8 vf
; /* validity flags */
267 u8 rs
; /* reporting source */
268 u8 cc
; /* content code */
269 u16 fla
; /* full link address */
270 u16 rsid
; /* reporting source id */
273 /* ccdf has to be big enough for a link-incident record */
274 u8 ccdf
[PAGE_SIZE
- 24 - 16]; /* content-code dependent field */
277 struct chsc_sei_nt2_area
{
278 u8 flags
; /* p and v bit */
281 u8 cc
; /* content code */
283 u8 ccdf
[PAGE_SIZE
- 24 - 56]; /* content-code dependent field */
286 #define CHSC_SEI_NT0 (1ULL << 63)
287 #define CHSC_SEI_NT2 (1ULL << 61)
290 struct chsc_header request
;
292 u64 ntsm
; /* notification type mask */
293 struct chsc_header response
;
297 struct chsc_sei_nt0_area nt0_area
;
298 struct chsc_sei_nt2_area nt2_area
;
299 u8 nt_area
[PAGE_SIZE
- 24];
303 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area
*sei_area
)
308 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
309 sei_area
->rs
, sei_area
->rsid
);
310 if (sei_area
->rs
!= 4)
312 id
= __get_chpid_from_lir(sei_area
->ccdf
);
314 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
318 chsc_chp_offline(chpid
);
322 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area
*sei_area
)
324 struct chp_link link
;
328 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
329 "rs_id=%04x)\n", sei_area
->rs
, sei_area
->rsid
);
330 if (sei_area
->rs
!= 4)
333 chpid
.id
= sei_area
->rsid
;
334 /* allocate a new channel path structure, if needed */
335 status
= chp_get_status(chpid
);
340 memset(&link
, 0, sizeof(struct chp_link
));
342 if ((sei_area
->vf
& 0xc0) != 0) {
343 link
.fla
= sei_area
->fla
;
344 if ((sei_area
->vf
& 0xc0) == 0xc0)
345 /* full link address */
346 link
.fla_mask
= 0xffff;
349 link
.fla_mask
= 0xff00;
351 s390_process_res_acc(&link
);
354 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area
*sei_area
)
356 struct channel_path
*chp
;
361 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
362 if (sei_area
->rs
!= 0)
364 data
= sei_area
->ccdf
;
366 for (num
= 0; num
<= __MAX_CHPID
; num
++) {
367 if (!chp_test_bit(data
, num
))
371 CIO_CRW_EVENT(4, "Update information for channel path "
372 "%x.%02x\n", chpid
.cssid
, chpid
.id
);
373 chp
= chpid_to_chp(chpid
);
378 mutex_lock(&chp
->lock
);
379 chsc_determine_base_channel_path_desc(chpid
, &chp
->desc
);
380 mutex_unlock(&chp
->lock
);
384 struct chp_config_data
{
390 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area
*sei_area
)
392 struct chp_config_data
*data
;
395 char *events
[3] = {"configure", "deconfigure", "cancel deconfigure"};
397 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
398 if (sei_area
->rs
!= 0)
400 data
= (struct chp_config_data
*) &(sei_area
->ccdf
);
402 for (num
= 0; num
<= __MAX_CHPID
; num
++) {
403 if (!chp_test_bit(data
->map
, num
))
406 pr_notice("Processing %s for channel path %x.%02x\n",
407 events
[data
->op
], chpid
.cssid
, chpid
.id
);
410 chp_cfg_schedule(chpid
, 1);
413 chp_cfg_schedule(chpid
, 0);
416 chp_cfg_cancel_deconfigure(chpid
);
422 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area
*sei_area
)
426 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
427 if (sei_area
->rs
!= 7)
430 ret
= scm_update_information();
432 CIO_CRW_EVENT(0, "chsc: updating change notification"
433 " failed (rc=%d).\n", ret
);
436 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area
*sei_area
)
438 switch (sei_area
->cc
) {
440 zpci_event_error(sei_area
->ccdf
);
443 zpci_event_availability(sei_area
->ccdf
);
446 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
452 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area
*sei_area
)
454 /* which kind of information was stored? */
455 switch (sei_area
->cc
) {
456 case 1: /* link incident*/
457 chsc_process_sei_link_incident(sei_area
);
459 case 2: /* i/o resource accessibility */
460 chsc_process_sei_res_acc(sei_area
);
462 case 7: /* channel-path-availability information */
463 chsc_process_sei_chp_avail(sei_area
);
465 case 8: /* channel-path-configuration notification */
466 chsc_process_sei_chp_config(sei_area
);
468 case 12: /* scm change notification */
469 chsc_process_sei_scm_change(sei_area
);
471 default: /* other stuff */
472 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
477 /* Check if we might have lost some information. */
478 if (sei_area
->flags
& 0x40) {
479 CIO_CRW_EVENT(2, "chsc: event overflow\n");
480 css_schedule_eval_all();
484 static void chsc_process_event_information(struct chsc_sei
*sei
, u64 ntsm
)
487 memset(sei
, 0, sizeof(*sei
));
488 sei
->request
.length
= 0x0010;
489 sei
->request
.code
= 0x000e;
495 if (sei
->response
.code
!= 0x0001) {
496 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
501 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei
->nt
);
504 chsc_process_sei_nt0(&sei
->u
.nt0_area
);
507 chsc_process_sei_nt2(&sei
->u
.nt2_area
);
510 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei
->nt
);
513 } while (sei
->u
.nt0_area
.flags
& 0x80);
517 * Handle channel subsystem related CRWs.
518 * Use store event information to find out what's going on.
520 * Note: Access to sei_page is serialized through machine check handler
521 * thread, so no need for locking.
523 static void chsc_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
525 struct chsc_sei
*sei
= sei_page
;
528 css_schedule_eval_all();
531 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
532 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
533 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
534 crw0
->erc
, crw0
->rsid
);
536 CIO_TRACE_EVENT(2, "prcss");
537 chsc_process_event_information(sei
, CHSC_SEI_NT0
| CHSC_SEI_NT2
);
540 void chsc_chp_online(struct chp_id chpid
)
543 struct chp_link link
;
545 sprintf(dbf_txt
, "cadd%x.%02x", chpid
.cssid
, chpid
.id
);
546 CIO_TRACE_EVENT(2, dbf_txt
);
548 if (chp_get_status(chpid
) != 0) {
549 memset(&link
, 0, sizeof(struct chp_link
));
551 /* Wait until previous actions have settled. */
552 css_wait_for_slow_path();
553 for_each_subchannel_staged(__s390_process_res_acc
, NULL
,
558 static void __s390_subchannel_vary_chpid(struct subchannel
*sch
,
559 struct chp_id chpid
, int on
)
562 struct chp_link link
;
564 memset(&link
, 0, sizeof(struct chp_link
));
566 spin_lock_irqsave(sch
->lock
, flags
);
567 if (sch
->driver
&& sch
->driver
->chp_event
)
568 sch
->driver
->chp_event(sch
, &link
,
569 on
? CHP_VARY_ON
: CHP_VARY_OFF
);
570 spin_unlock_irqrestore(sch
->lock
, flags
);
573 static int s390_subchannel_vary_chpid_off(struct subchannel
*sch
, void *data
)
575 struct chp_id
*chpid
= data
;
577 __s390_subchannel_vary_chpid(sch
, *chpid
, 0);
581 static int s390_subchannel_vary_chpid_on(struct subchannel
*sch
, void *data
)
583 struct chp_id
*chpid
= data
;
585 __s390_subchannel_vary_chpid(sch
, *chpid
, 1);
590 __s390_vary_chpid_on(struct subchannel_id schid
, void *data
)
594 if (stsch_err(schid
, &schib
))
597 /* Put it on the slow path. */
598 css_schedule_eval(schid
);
603 * chsc_chp_vary - propagate channel-path vary operation to subchannels
604 * @chpid: channl-path ID
605 * @on: non-zero for vary online, zero for vary offline
607 int chsc_chp_vary(struct chp_id chpid
, int on
)
609 struct channel_path
*chp
= chpid_to_chp(chpid
);
611 /* Wait until previous actions have settled. */
612 css_wait_for_slow_path();
614 * Redo PathVerification on the devices the chpid connects to
617 /* Try to update the channel path descritor. */
618 chsc_determine_base_channel_path_desc(chpid
, &chp
->desc
);
619 for_each_subchannel_staged(s390_subchannel_vary_chpid_on
,
620 __s390_vary_chpid_on
, &chpid
);
622 for_each_subchannel_staged(s390_subchannel_vary_chpid_off
,
629 chsc_remove_cmg_attr(struct channel_subsystem
*css
)
633 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
636 chp_remove_cmg_attr(css
->chps
[i
]);
641 chsc_add_cmg_attr(struct channel_subsystem
*css
)
646 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
649 ret
= chp_add_cmg_attr(css
->chps
[i
]);
655 for (--i
; i
>= 0; i
--) {
658 chp_remove_cmg_attr(css
->chps
[i
]);
663 int __chsc_do_secm(struct channel_subsystem
*css
, int enable
)
666 struct chsc_header request
;
667 u32 operation_code
: 2;
676 struct chsc_header response
;
681 } __attribute__ ((packed
)) *secm_area
;
684 spin_lock_irq(&chsc_page_lock
);
685 memset(chsc_page
, 0, PAGE_SIZE
);
686 secm_area
= chsc_page
;
687 secm_area
->request
.length
= 0x0050;
688 secm_area
->request
.code
= 0x0016;
690 secm_area
->key
= PAGE_DEFAULT_KEY
>> 4;
691 secm_area
->cub_addr1
= (u64
)(unsigned long)css
->cub_addr1
;
692 secm_area
->cub_addr2
= (u64
)(unsigned long)css
->cub_addr2
;
694 secm_area
->operation_code
= enable
? 0 : 1;
696 ccode
= chsc(secm_area
);
698 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
702 switch (secm_area
->response
.code
) {
708 ret
= chsc_error_from_response(secm_area
->response
.code
);
711 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
712 secm_area
->response
.code
);
714 spin_unlock_irq(&chsc_page_lock
);
719 chsc_secm(struct channel_subsystem
*css
, int enable
)
723 if (enable
&& !css
->cm_enabled
) {
724 css
->cub_addr1
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
725 css
->cub_addr2
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
726 if (!css
->cub_addr1
|| !css
->cub_addr2
) {
727 free_page((unsigned long)css
->cub_addr1
);
728 free_page((unsigned long)css
->cub_addr2
);
732 ret
= __chsc_do_secm(css
, enable
);
734 css
->cm_enabled
= enable
;
735 if (css
->cm_enabled
) {
736 ret
= chsc_add_cmg_attr(css
);
738 __chsc_do_secm(css
, 0);
742 chsc_remove_cmg_attr(css
);
744 if (!css
->cm_enabled
) {
745 free_page((unsigned long)css
->cub_addr1
);
746 free_page((unsigned long)css
->cub_addr2
);
751 int chsc_determine_channel_path_desc(struct chp_id chpid
, int fmt
, int rfmt
,
752 int c
, int m
, void *page
)
754 struct chsc_scpd
*scpd_area
;
757 if ((rfmt
== 1) && !css_general_characteristics
.fcs
)
759 if ((rfmt
== 2) && !css_general_characteristics
.cib
)
762 memset(page
, 0, PAGE_SIZE
);
764 scpd_area
->request
.length
= 0x0010;
765 scpd_area
->request
.code
= 0x0002;
766 scpd_area
->cssid
= chpid
.cssid
;
767 scpd_area
->first_chpid
= chpid
.id
;
768 scpd_area
->last_chpid
= chpid
.id
;
771 scpd_area
->fmt
= fmt
;
772 scpd_area
->rfmt
= rfmt
;
774 ccode
= chsc(scpd_area
);
776 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
778 ret
= chsc_error_from_response(scpd_area
->response
.code
);
780 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
781 scpd_area
->response
.code
);
784 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc
);
786 int chsc_determine_base_channel_path_desc(struct chp_id chpid
,
787 struct channel_path_desc
*desc
)
789 struct chsc_response_struct
*chsc_resp
;
790 struct chsc_scpd
*scpd_area
;
794 spin_lock_irqsave(&chsc_page_lock
, flags
);
795 scpd_area
= chsc_page
;
796 ret
= chsc_determine_channel_path_desc(chpid
, 0, 0, 0, 0, scpd_area
);
799 chsc_resp
= (void *)&scpd_area
->response
;
800 memcpy(desc
, &chsc_resp
->data
, sizeof(*desc
));
802 spin_unlock_irqrestore(&chsc_page_lock
, flags
);
806 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid
,
807 struct channel_path_desc_fmt1
*desc
)
809 struct chsc_response_struct
*chsc_resp
;
810 struct chsc_scpd
*scpd_area
;
813 spin_lock_irq(&chsc_page_lock
);
814 scpd_area
= chsc_page
;
815 ret
= chsc_determine_channel_path_desc(chpid
, 0, 0, 1, 0, scpd_area
);
818 chsc_resp
= (void *)&scpd_area
->response
;
819 memcpy(desc
, &chsc_resp
->data
, sizeof(*desc
));
821 spin_unlock_irq(&chsc_page_lock
);
826 chsc_initialize_cmg_chars(struct channel_path
*chp
, u8 cmcv
,
827 struct cmg_chars
*chars
)
829 struct cmg_chars
*cmg_chars
;
832 cmg_chars
= chp
->cmg_chars
;
833 for (i
= 0; i
< NR_MEASUREMENT_CHARS
; i
++) {
834 mask
= 0x80 >> (i
+ 3);
836 cmg_chars
->values
[i
] = chars
->values
[i
];
838 cmg_chars
->values
[i
] = 0;
842 int chsc_get_channel_measurement_chars(struct channel_path
*chp
)
844 struct cmg_chars
*cmg_chars
;
848 struct chsc_header request
;
854 struct chsc_header response
;
865 u32 data
[NR_MEASUREMENT_CHARS
];
866 } __attribute__ ((packed
)) *scmc_area
;
868 chp
->cmg_chars
= NULL
;
869 cmg_chars
= kmalloc(sizeof(*cmg_chars
), GFP_KERNEL
);
873 spin_lock_irq(&chsc_page_lock
);
874 memset(chsc_page
, 0, PAGE_SIZE
);
875 scmc_area
= chsc_page
;
876 scmc_area
->request
.length
= 0x0010;
877 scmc_area
->request
.code
= 0x0022;
878 scmc_area
->first_chpid
= chp
->chpid
.id
;
879 scmc_area
->last_chpid
= chp
->chpid
.id
;
881 ccode
= chsc(scmc_area
);
883 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
887 ret
= chsc_error_from_response(scmc_area
->response
.code
);
889 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
890 scmc_area
->response
.code
);
893 if (scmc_area
->not_valid
) {
898 chp
->cmg
= scmc_area
->cmg
;
899 chp
->shared
= scmc_area
->shared
;
900 if (chp
->cmg
!= 2 && chp
->cmg
!= 3) {
901 /* No cmg-dependent data. */
904 chp
->cmg_chars
= cmg_chars
;
905 chsc_initialize_cmg_chars(chp
, scmc_area
->cmcv
,
906 (struct cmg_chars
*) &scmc_area
->data
);
908 spin_unlock_irq(&chsc_page_lock
);
915 int __init
chsc_init(void)
919 sei_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
920 chsc_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
921 if (!sei_page
|| !chsc_page
) {
925 ret
= crw_register_handler(CRW_RSC_CSS
, chsc_process_crw
);
930 free_page((unsigned long)chsc_page
);
931 free_page((unsigned long)sei_page
);
935 void __init
chsc_init_cleanup(void)
937 crw_unregister_handler(CRW_RSC_CSS
);
938 free_page((unsigned long)chsc_page
);
939 free_page((unsigned long)sei_page
);
942 int chsc_enable_facility(int operation_code
)
947 struct chsc_header request
;
954 u32 operation_data_area
[252];
955 struct chsc_header response
;
959 } __attribute__ ((packed
)) *sda_area
;
961 spin_lock_irqsave(&chsc_page_lock
, flags
);
962 memset(chsc_page
, 0, PAGE_SIZE
);
963 sda_area
= chsc_page
;
964 sda_area
->request
.length
= 0x0400;
965 sda_area
->request
.code
= 0x0031;
966 sda_area
->operation_code
= operation_code
;
968 ret
= chsc(sda_area
);
970 ret
= (ret
== 3) ? -ENODEV
: -EBUSY
;
974 switch (sda_area
->response
.code
) {
979 ret
= chsc_error_from_response(sda_area
->response
.code
);
982 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
983 operation_code
, sda_area
->response
.code
);
985 spin_unlock_irqrestore(&chsc_page_lock
, flags
);
989 struct css_general_char css_general_characteristics
;
990 struct css_chsc_char css_chsc_characteristics
;
993 chsc_determine_css_characteristics(void)
997 struct chsc_header request
;
1001 struct chsc_header response
;
1003 u32 general_char
[510];
1005 } __attribute__ ((packed
)) *scsc_area
;
1007 spin_lock_irq(&chsc_page_lock
);
1008 memset(chsc_page
, 0, PAGE_SIZE
);
1009 scsc_area
= chsc_page
;
1010 scsc_area
->request
.length
= 0x0010;
1011 scsc_area
->request
.code
= 0x0010;
1013 result
= chsc(scsc_area
);
1015 result
= (result
== 3) ? -ENODEV
: -EBUSY
;
1019 result
= chsc_error_from_response(scsc_area
->response
.code
);
1021 memcpy(&css_general_characteristics
, scsc_area
->general_char
,
1022 sizeof(css_general_characteristics
));
1023 memcpy(&css_chsc_characteristics
, scsc_area
->chsc_char
,
1024 sizeof(css_chsc_characteristics
));
1026 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1027 scsc_area
->response
.code
);
1029 spin_unlock_irq(&chsc_page_lock
);
1033 EXPORT_SYMBOL_GPL(css_general_characteristics
);
1034 EXPORT_SYMBOL_GPL(css_chsc_characteristics
);
1036 int chsc_sstpc(void *page
, unsigned int op
, u16 ctrl
)
1039 struct chsc_header request
;
1041 unsigned int op
: 8;
1042 unsigned int rsvd1
: 8;
1043 unsigned int ctrl
: 16;
1044 unsigned int rsvd2
[5];
1045 struct chsc_header response
;
1046 unsigned int rsvd3
[7];
1047 } __attribute__ ((packed
)) *rr
;
1050 memset(page
, 0, PAGE_SIZE
);
1052 rr
->request
.length
= 0x0020;
1053 rr
->request
.code
= 0x0033;
1059 rc
= (rr
->response
.code
== 0x0001) ? 0 : -EIO
;
1063 int chsc_sstpi(void *page
, void *result
, size_t size
)
1066 struct chsc_header request
;
1067 unsigned int rsvd0
[3];
1068 struct chsc_header response
;
1070 } __attribute__ ((packed
)) *rr
;
1073 memset(page
, 0, PAGE_SIZE
);
1075 rr
->request
.length
= 0x0010;
1076 rr
->request
.code
= 0x0038;
1080 memcpy(result
, &rr
->data
, size
);
1081 return (rr
->response
.code
== 0x0001) ? 0 : -EIO
;
1084 int chsc_siosl(struct subchannel_id schid
)
1087 struct chsc_header request
;
1089 struct subchannel_id sid
;
1091 struct chsc_header response
;
1093 } __attribute__ ((packed
)) *siosl_area
;
1094 unsigned long flags
;
1098 spin_lock_irqsave(&chsc_page_lock
, flags
);
1099 memset(chsc_page
, 0, PAGE_SIZE
);
1100 siosl_area
= chsc_page
;
1101 siosl_area
->request
.length
= 0x0010;
1102 siosl_area
->request
.code
= 0x0046;
1103 siosl_area
->word1
= 0x80000000;
1104 siosl_area
->sid
= schid
;
1106 ccode
= chsc(siosl_area
);
1112 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1113 schid
.ssid
, schid
.sch_no
, ccode
);
1116 rc
= chsc_error_from_response(siosl_area
->response
.code
);
1118 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1119 schid
.ssid
, schid
.sch_no
,
1120 siosl_area
->response
.code
);
1122 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1123 schid
.ssid
, schid
.sch_no
);
1125 spin_unlock_irqrestore(&chsc_page_lock
, flags
);
1128 EXPORT_SYMBOL_GPL(chsc_siosl
);
1131 * chsc_scm_info() - store SCM information (SSI)
1132 * @scm_area: request and response block for SSI
1133 * @token: continuation token
1135 * Returns 0 on success.
1137 int chsc_scm_info(struct chsc_scm_info
*scm_area
, u64 token
)
1141 memset(scm_area
, 0, sizeof(*scm_area
));
1142 scm_area
->request
.length
= 0x0020;
1143 scm_area
->request
.code
= 0x004C;
1144 scm_area
->reqtok
= token
;
1146 ccode
= chsc(scm_area
);
1148 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
1151 ret
= chsc_error_from_response(scm_area
->response
.code
);
1153 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1154 scm_area
->response
.code
);
1158 EXPORT_SYMBOL_GPL(chsc_scm_info
);