1 // SPDX-License-Identifier: GPL-2.0
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright IBM Corp. 1999,2012
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/mutex.h>
19 #include <linux/pci.h>
22 #include <asm/chpid.h>
26 #include <asm/ebcdic.h>
31 #include "cio_debug.h"
36 static void *sei_page;
37 static void *chsc_page;
38 static DEFINE_SPINLOCK(chsc_page_lock);
41 * chsc_error_from_response() - convert a chsc response to an error
42 * @response: chsc response code
44 * Returns an appropriate Linux error code for @response.
46 int chsc_error_from_response(int response)
62 case 0x0107: /* "Channel busy" for the op 0x003d */
71 EXPORT_SYMBOL_GPL(chsc_error_from_response);
73 struct chsc_ssd_area {
74 struct chsc_header request;
78 u16 f_sch; /* first subchannel */
80 u16 l_sch; /* last subchannel */
82 struct chsc_header response;
86 u8 st : 3; /* subchannel type */
88 u8 unit_addr; /* unit address */
89 u16 devno; /* device number */
92 u16 sch; /* subchannel */
93 u8 chpid[8]; /* chpids 0-7 */
94 u16 fla[8]; /* full link addresses 0-7 */
95 } __packed __aligned(PAGE_SIZE);
97 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
99 struct chsc_ssd_area *ssd_area;
106 spin_lock_irqsave(&chsc_page_lock, flags);
107 memset(chsc_page, 0, PAGE_SIZE);
108 ssd_area = chsc_page;
109 ssd_area->request.length = 0x0010;
110 ssd_area->request.code = 0x0004;
111 ssd_area->ssid = schid.ssid;
112 ssd_area->f_sch = schid.sch_no;
113 ssd_area->l_sch = schid.sch_no;
115 ccode = chsc(ssd_area);
116 /* Check response. */
118 ret = (ccode == 3) ? -ENODEV : -EBUSY;
121 ret = chsc_error_from_response(ssd_area->response.code);
123 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
124 schid.ssid, schid.sch_no,
125 ssd_area->response.code);
128 if (!ssd_area->sch_valid) {
134 memset(ssd, 0, sizeof(struct chsc_ssd_info));
135 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
136 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
138 ssd->path_mask = ssd_area->path_mask;
139 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
140 for (i = 0; i < 8; i++) {
142 if (ssd_area->path_mask & mask) {
143 chp_id_init(&ssd->chpid[i]);
144 ssd->chpid[i].id = ssd_area->chpid[i];
146 if (ssd_area->fla_valid_mask & mask)
147 ssd->fla[i] = ssd_area->fla[i];
150 spin_unlock_irqrestore(&chsc_page_lock, flags);
155 * chsc_ssqd() - store subchannel QDIO data (SSQD)
156 * @schid: id of the subchannel on which SSQD is performed
157 * @ssqd: request and response block for SSQD
159 * Returns 0 on success.
161 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
163 memset(ssqd, 0, sizeof(*ssqd));
164 ssqd->request.length = 0x0010;
165 ssqd->request.code = 0x0024;
166 ssqd->first_sch = schid.sch_no;
167 ssqd->last_sch = schid.sch_no;
168 ssqd->ssid = schid.ssid;
173 return chsc_error_from_response(ssqd->response.code);
175 EXPORT_SYMBOL_GPL(chsc_ssqd);
178 * chsc_sadc() - set adapter device controls (SADC)
179 * @schid: id of the subchannel on which SADC is performed
180 * @scssc: request and response block for SADC
181 * @summary_indicator_addr: summary indicator address
182 * @subchannel_indicator_addr: subchannel indicator address
184 * Returns 0 on success.
186 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
187 u64 summary_indicator_addr, u64 subchannel_indicator_addr)
189 memset(scssc, 0, sizeof(*scssc));
190 scssc->request.length = 0x0fe0;
191 scssc->request.code = 0x0021;
192 scssc->operation_code = 0;
194 scssc->summary_indicator_addr = summary_indicator_addr;
195 scssc->subchannel_indicator_addr = subchannel_indicator_addr;
197 scssc->ks = PAGE_DEFAULT_KEY >> 4;
198 scssc->kc = PAGE_DEFAULT_KEY >> 4;
199 scssc->isc = QDIO_AIRQ_ISC;
200 scssc->schid = schid;
202 /* enable the time delay disablement facility */
203 if (css_general_characteristics.aif_tdd)
204 scssc->word_with_d_bit = 0x10000000;
209 return chsc_error_from_response(scssc->response.code);
211 EXPORT_SYMBOL_GPL(chsc_sadc);
213 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
215 spin_lock_irq(sch->lock);
216 if (sch->driver && sch->driver->chp_event)
217 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
219 spin_unlock_irq(sch->lock);
224 spin_unlock_irq(sch->lock);
225 css_schedule_eval(sch->schid);
229 void chsc_chp_offline(struct chp_id chpid)
231 struct channel_path *chp = chpid_to_chp(chpid);
232 struct chp_link link;
235 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
236 CIO_TRACE_EVENT(2, dbf_txt);
238 if (chp_get_status(chpid) <= 0)
240 memset(&link, 0, sizeof(struct chp_link));
242 /* Wait until previous actions have settled. */
243 css_wait_for_slow_path();
245 mutex_lock(&chp->lock);
246 chp_update_desc(chp);
247 mutex_unlock(&chp->lock);
249 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
252 static int __s390_process_res_acc(struct subchannel *sch, void *data)
254 spin_lock_irq(sch->lock);
255 if (sch->driver && sch->driver->chp_event)
256 sch->driver->chp_event(sch, data, CHP_ONLINE);
257 spin_unlock_irq(sch->lock);
262 static void s390_process_res_acc(struct chp_link *link)
266 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
268 CIO_TRACE_EVENT( 2, dbf_txt);
269 if (link->fla != 0) {
270 sprintf(dbf_txt, "fla%x", link->fla);
271 CIO_TRACE_EVENT( 2, dbf_txt);
273 /* Wait until previous actions have settled. */
274 css_wait_for_slow_path();
276 * I/O resources may have become accessible.
277 * Scan through all subchannels that may be concerned and
278 * do a validation on those.
279 * The more information we have (info), the less scanning
280 * will we have to do.
282 for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
283 css_schedule_reprobe();
286 struct chsc_sei_nt0_area {
288 u8 vf; /* validity flags */
289 u8 rs; /* reporting source */
290 u8 cc; /* content code */
291 u16 fla; /* full link address */
292 u16 rsid; /* reporting source id */
295 /* ccdf has to be big enough for a link-incident record */
296 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
299 struct chsc_sei_nt2_area {
300 u8 flags; /* p and v bit */
303 u8 cc; /* content code */
305 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
308 #define CHSC_SEI_NT0 (1ULL << 63)
309 #define CHSC_SEI_NT2 (1ULL << 61)
312 struct chsc_header request;
314 u64 ntsm; /* notification type mask */
315 struct chsc_header response;
319 struct chsc_sei_nt0_area nt0_area;
320 struct chsc_sei_nt2_area nt2_area;
321 u8 nt_area[PAGE_SIZE - 24];
323 } __packed __aligned(PAGE_SIZE);
326 * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
329 #define ND_VALIDITY_VALID 0
330 #define ND_VALIDITY_OUTDATED 1
331 #define ND_VALIDITY_INVALID 2
333 struct node_descriptor {
343 /* Node parameters. */
349 char manufacturer[3];
356 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
359 #define LIR_IQ_CLASS_INFO 0
360 #define LIR_IQ_CLASS_DEGRADED 1
361 #define LIR_IQ_CLASS_NOT_OPERATIONAL 2
372 struct node_descriptor incident_node;
373 struct node_descriptor attached_node;
377 #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
378 #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
380 /* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
381 static char *store_ebcdic(char *dest, const char *src, unsigned long len,
384 memcpy(dest, src, len);
393 /* Format node ID and parameters for output in LIR log message. */
394 static void format_node_data(char *params, char *id, struct node_descriptor *nd)
396 memset(params, 0, PARAMS_LEN);
397 memset(id, 0, NODEID_LEN);
399 if (nd->validity != ND_VALIDITY_VALID) {
400 strncpy(params, "n/a", PARAMS_LEN - 1);
401 strncpy(id, "n/a", NODEID_LEN - 1);
405 /* PARAMS=xx,xxxxxx */
406 snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
407 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
408 id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
409 id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
410 id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
411 id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
412 id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
413 sprintf(id, "%04X", nd->tag);
416 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
418 struct lir *lir = (struct lir *) &sei_area->ccdf;
419 char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
420 aunodeid[NODEID_LEN];
422 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
423 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
425 /* Ignore NULL Link Incident Records. */
429 /* Inform user that a link requires maintenance actions because it has
430 * become degraded or not operational. Note that this log message is
431 * the primary intention behind a Link Incident Record. */
433 format_node_data(iuparams, iunodeid, &lir->incident_node);
434 format_node_data(auparams, aunodeid, &lir->attached_node);
436 switch (lir->iq.class) {
437 case LIR_IQ_CLASS_DEGRADED:
438 pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
439 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
440 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
441 iunodeid, auparams, aunodeid);
443 case LIR_IQ_CLASS_NOT_OPERATIONAL:
444 pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
445 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
446 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
447 iunodeid, auparams, aunodeid);
454 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
456 struct channel_path *chp;
457 struct chp_link link;
461 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
462 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
463 if (sei_area->rs != 4)
466 chpid.id = sei_area->rsid;
467 /* allocate a new channel path structure, if needed */
468 status = chp_get_status(chpid);
475 chp = chpid_to_chp(chpid);
476 mutex_lock(&chp->lock);
477 chp_update_desc(chp);
478 mutex_unlock(&chp->lock);
480 memset(&link, 0, sizeof(struct chp_link));
482 if ((sei_area->vf & 0xc0) != 0) {
483 link.fla = sei_area->fla;
484 if ((sei_area->vf & 0xc0) == 0xc0)
485 /* full link address */
486 link.fla_mask = 0xffff;
489 link.fla_mask = 0xff00;
491 s390_process_res_acc(&link);
494 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
496 struct channel_path *chp;
501 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
502 if (sei_area->rs != 0)
504 data = sei_area->ccdf;
506 for (num = 0; num <= __MAX_CHPID; num++) {
507 if (!chp_test_bit(data, num))
511 CIO_CRW_EVENT(4, "Update information for channel path "
512 "%x.%02x\n", chpid.cssid, chpid.id);
513 chp = chpid_to_chp(chpid);
518 mutex_lock(&chp->lock);
519 chp_update_desc(chp);
520 mutex_unlock(&chp->lock);
524 struct chp_config_data {
530 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
532 struct chp_config_data *data;
535 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
537 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
538 if (sei_area->rs != 0)
540 data = (struct chp_config_data *) &(sei_area->ccdf);
542 for (num = 0; num <= __MAX_CHPID; num++) {
543 if (!chp_test_bit(data->map, num))
546 pr_notice("Processing %s for channel path %x.%02x\n",
547 events[data->op], chpid.cssid, chpid.id);
550 chp_cfg_schedule(chpid, 1);
553 chp_cfg_schedule(chpid, 0);
556 chp_cfg_cancel_deconfigure(chpid);
562 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
566 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
567 if (sei_area->rs != 7)
570 ret = scm_update_information();
572 CIO_CRW_EVENT(0, "chsc: updating change notification"
573 " failed (rc=%d).\n", ret);
576 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
580 CIO_CRW_EVENT(4, "chsc: scm available information\n");
581 if (sei_area->rs != 7)
584 ret = scm_process_availability_information();
586 CIO_CRW_EVENT(0, "chsc: process availability information"
587 " failed (rc=%d).\n", ret);
590 static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
592 CIO_CRW_EVENT(3, "chsc: ap config changed\n");
593 if (sei_area->rs != 5)
599 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
601 switch (sei_area->cc) {
603 zpci_event_error(sei_area->ccdf);
606 zpci_event_availability(sei_area->ccdf);
609 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
615 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
617 /* which kind of information was stored? */
618 switch (sei_area->cc) {
619 case 1: /* link incident*/
620 chsc_process_sei_link_incident(sei_area);
622 case 2: /* i/o resource accessibility */
623 chsc_process_sei_res_acc(sei_area);
625 case 3: /* ap config changed */
626 chsc_process_sei_ap_cfg_chg(sei_area);
628 case 7: /* channel-path-availability information */
629 chsc_process_sei_chp_avail(sei_area);
631 case 8: /* channel-path-configuration notification */
632 chsc_process_sei_chp_config(sei_area);
634 case 12: /* scm change notification */
635 chsc_process_sei_scm_change(sei_area);
637 case 14: /* scm available notification */
638 chsc_process_sei_scm_avail(sei_area);
640 default: /* other stuff */
641 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
646 /* Check if we might have lost some information. */
647 if (sei_area->flags & 0x40) {
648 CIO_CRW_EVENT(2, "chsc: event overflow\n");
649 css_schedule_eval_all();
653 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
655 static int ntsm_unsupported;
658 memset(sei, 0, sizeof(*sei));
659 sei->request.length = 0x0010;
660 sei->request.code = 0x000e;
661 if (!ntsm_unsupported)
667 if (sei->response.code != 0x0001) {
668 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
669 sei->response.code, sei->ntsm);
671 if (sei->response.code == 3 && sei->ntsm) {
672 /* Fallback for old firmware. */
673 ntsm_unsupported = 1;
679 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
682 chsc_process_sei_nt0(&sei->u.nt0_area);
685 chsc_process_sei_nt2(&sei->u.nt2_area);
688 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
692 if (!(sei->u.nt0_area.flags & 0x80))
698 * Handle channel subsystem related CRWs.
699 * Use store event information to find out what's going on.
701 * Note: Access to sei_page is serialized through machine check handler
702 * thread, so no need for locking.
704 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
706 struct chsc_sei *sei = sei_page;
709 css_schedule_eval_all();
712 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
713 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
714 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
715 crw0->erc, crw0->rsid);
717 CIO_TRACE_EVENT(2, "prcss");
718 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
721 void chsc_chp_online(struct chp_id chpid)
723 struct channel_path *chp = chpid_to_chp(chpid);
724 struct chp_link link;
727 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
728 CIO_TRACE_EVENT(2, dbf_txt);
730 if (chp_get_status(chpid) != 0) {
731 memset(&link, 0, sizeof(struct chp_link));
733 /* Wait until previous actions have settled. */
734 css_wait_for_slow_path();
736 mutex_lock(&chp->lock);
737 chp_update_desc(chp);
738 mutex_unlock(&chp->lock);
740 for_each_subchannel_staged(__s390_process_res_acc, NULL,
742 css_schedule_reprobe();
746 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
747 struct chp_id chpid, int on)
750 struct chp_link link;
752 memset(&link, 0, sizeof(struct chp_link));
754 spin_lock_irqsave(sch->lock, flags);
755 if (sch->driver && sch->driver->chp_event)
756 sch->driver->chp_event(sch, &link,
757 on ? CHP_VARY_ON : CHP_VARY_OFF);
758 spin_unlock_irqrestore(sch->lock, flags);
761 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
763 struct chp_id *chpid = data;
765 __s390_subchannel_vary_chpid(sch, *chpid, 0);
769 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
771 struct chp_id *chpid = data;
773 __s390_subchannel_vary_chpid(sch, *chpid, 1);
778 * chsc_chp_vary - propagate channel-path vary operation to subchannels
779 * @chpid: channl-path ID
780 * @on: non-zero for vary online, zero for vary offline
782 int chsc_chp_vary(struct chp_id chpid, int on)
784 struct channel_path *chp = chpid_to_chp(chpid);
786 /* Wait until previous actions have settled. */
787 css_wait_for_slow_path();
789 * Redo PathVerification on the devices the chpid connects to
792 /* Try to update the channel path description. */
793 chp_update_desc(chp);
794 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
796 css_schedule_reprobe();
798 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
805 chsc_remove_cmg_attr(struct channel_subsystem *css)
809 for (i = 0; i <= __MAX_CHPID; i++) {
812 chp_remove_cmg_attr(css->chps[i]);
817 chsc_add_cmg_attr(struct channel_subsystem *css)
822 for (i = 0; i <= __MAX_CHPID; i++) {
825 ret = chp_add_cmg_attr(css->chps[i]);
831 for (--i; i >= 0; i--) {
834 chp_remove_cmg_attr(css->chps[i]);
839 int __chsc_do_secm(struct channel_subsystem *css, int enable)
842 struct chsc_header request;
843 u32 operation_code : 2;
852 struct chsc_header response;
861 spin_lock_irqsave(&chsc_page_lock, flags);
862 memset(chsc_page, 0, PAGE_SIZE);
863 secm_area = chsc_page;
864 secm_area->request.length = 0x0050;
865 secm_area->request.code = 0x0016;
867 secm_area->key = PAGE_DEFAULT_KEY >> 4;
868 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
869 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
871 secm_area->operation_code = enable ? 0 : 1;
873 ccode = chsc(secm_area);
875 ret = (ccode == 3) ? -ENODEV : -EBUSY;
879 switch (secm_area->response.code) {
885 ret = chsc_error_from_response(secm_area->response.code);
888 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
889 secm_area->response.code);
891 spin_unlock_irqrestore(&chsc_page_lock, flags);
896 chsc_secm(struct channel_subsystem *css, int enable)
900 if (enable && !css->cm_enabled) {
901 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
902 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
903 if (!css->cub_addr1 || !css->cub_addr2) {
904 free_page((unsigned long)css->cub_addr1);
905 free_page((unsigned long)css->cub_addr2);
909 ret = __chsc_do_secm(css, enable);
911 css->cm_enabled = enable;
912 if (css->cm_enabled) {
913 ret = chsc_add_cmg_attr(css);
915 __chsc_do_secm(css, 0);
919 chsc_remove_cmg_attr(css);
921 if (!css->cm_enabled) {
922 free_page((unsigned long)css->cub_addr1);
923 free_page((unsigned long)css->cub_addr2);
928 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
929 int c, int m, void *page)
931 struct chsc_scpd *scpd_area;
934 if ((rfmt == 1 || rfmt == 0) && c == 1 &&
935 !css_general_characteristics.fcs)
937 if ((rfmt == 2) && !css_general_characteristics.cib)
939 if ((rfmt == 3) && !css_general_characteristics.util_str)
942 memset(page, 0, PAGE_SIZE);
944 scpd_area->request.length = 0x0010;
945 scpd_area->request.code = 0x0002;
946 scpd_area->cssid = chpid.cssid;
947 scpd_area->first_chpid = chpid.id;
948 scpd_area->last_chpid = chpid.id;
951 scpd_area->fmt = fmt;
952 scpd_area->rfmt = rfmt;
954 ccode = chsc(scpd_area);
956 return (ccode == 3) ? -ENODEV : -EBUSY;
958 ret = chsc_error_from_response(scpd_area->response.code);
960 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
961 scpd_area->response.code);
964 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
966 #define chsc_det_chp_desc(FMT, c) \
967 int chsc_determine_fmt##FMT##_channel_path_desc( \
968 struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \
970 struct chsc_scpd *scpd_area; \
971 unsigned long flags; \
974 spin_lock_irqsave(&chsc_page_lock, flags); \
975 scpd_area = chsc_page; \
976 ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \
981 memcpy(desc, scpd_area->data, sizeof(*desc)); \
983 spin_unlock_irqrestore(&chsc_page_lock, flags); \
987 chsc_det_chp_desc(0, 0)
988 chsc_det_chp_desc(1, 1)
989 chsc_det_chp_desc(3, 0)
992 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
993 struct cmg_chars *chars)
997 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
998 mask = 0x80 >> (i + 3);
1000 chp->cmg_chars.values[i] = chars->values[i];
1002 chp->cmg_chars.values[i] = 0;
1006 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1008 unsigned long flags;
1012 struct chsc_header request;
1014 u32 first_chpid : 8;
1018 struct chsc_header response;
1029 u32 data[NR_MEASUREMENT_CHARS];
1035 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1038 spin_lock_irqsave(&chsc_page_lock, flags);
1039 memset(chsc_page, 0, PAGE_SIZE);
1040 scmc_area = chsc_page;
1041 scmc_area->request.length = 0x0010;
1042 scmc_area->request.code = 0x0022;
1043 scmc_area->first_chpid = chp->chpid.id;
1044 scmc_area->last_chpid = chp->chpid.id;
1046 ccode = chsc(scmc_area);
1048 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1052 ret = chsc_error_from_response(scmc_area->response.code);
1054 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
1055 scmc_area->response.code);
1058 if (scmc_area->not_valid)
1061 chp->cmg = scmc_area->cmg;
1062 chp->shared = scmc_area->shared;
1063 if (chp->cmg != 2 && chp->cmg != 3) {
1064 /* No cmg-dependent data. */
1067 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1068 (struct cmg_chars *) &scmc_area->data);
1070 spin_unlock_irqrestore(&chsc_page_lock, flags);
1074 int __init chsc_init(void)
1078 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1079 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1080 if (!sei_page || !chsc_page) {
1084 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
1089 free_page((unsigned long)chsc_page);
1090 free_page((unsigned long)sei_page);
1094 void __init chsc_init_cleanup(void)
1096 crw_unregister_handler(CRW_RSC_CSS);
1097 free_page((unsigned long)chsc_page);
1098 free_page((unsigned long)sei_page);
1101 int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1105 sda_area->request.length = 0x0400;
1106 sda_area->request.code = 0x0031;
1107 sda_area->operation_code = operation_code;
1109 ret = chsc(sda_area);
1111 ret = (ret == 3) ? -ENODEV : -EBUSY;
1115 switch (sda_area->response.code) {
1120 ret = chsc_error_from_response(sda_area->response.code);
1126 int chsc_enable_facility(int operation_code)
1128 struct chsc_sda_area *sda_area;
1129 unsigned long flags;
1132 spin_lock_irqsave(&chsc_page_lock, flags);
1133 memset(chsc_page, 0, PAGE_SIZE);
1134 sda_area = chsc_page;
1136 ret = __chsc_enable_facility(sda_area, operation_code);
1138 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1139 operation_code, sda_area->response.code);
1141 spin_unlock_irqrestore(&chsc_page_lock, flags);
1145 int __init chsc_get_cssid(int idx)
1148 struct chsc_header request;
1152 struct chsc_header response;
1161 spin_lock_irq(&chsc_page_lock);
1162 memset(chsc_page, 0, PAGE_SIZE);
1163 sdcal_area = chsc_page;
1164 sdcal_area->request.length = 0x0020;
1165 sdcal_area->request.code = 0x0034;
1166 sdcal_area->atype = 4;
1168 ret = chsc(sdcal_area);
1170 ret = (ret == 3) ? -ENODEV : -EBUSY;
1174 ret = chsc_error_from_response(sdcal_area->response.code);
1176 CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
1177 sdcal_area->response.code);
1181 if ((addr_t) &sdcal_area->list[idx] <
1182 (addr_t) &sdcal_area->response + sdcal_area->response.length)
1183 ret = sdcal_area->list[idx].cssid;
1187 spin_unlock_irq(&chsc_page_lock);
1191 struct css_general_char css_general_characteristics;
1192 struct css_chsc_char css_chsc_characteristics;
1195 chsc_determine_css_characteristics(void)
1197 unsigned long flags;
1200 struct chsc_header request;
1204 struct chsc_header response;
1206 u32 general_char[510];
1210 spin_lock_irqsave(&chsc_page_lock, flags);
1211 memset(chsc_page, 0, PAGE_SIZE);
1212 scsc_area = chsc_page;
1213 scsc_area->request.length = 0x0010;
1214 scsc_area->request.code = 0x0010;
1216 result = chsc(scsc_area);
1218 result = (result == 3) ? -ENODEV : -EBUSY;
1222 result = chsc_error_from_response(scsc_area->response.code);
1224 memcpy(&css_general_characteristics, scsc_area->general_char,
1225 sizeof(css_general_characteristics));
1226 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1227 sizeof(css_chsc_characteristics));
1229 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1230 scsc_area->response.code);
1232 spin_unlock_irqrestore(&chsc_page_lock, flags);
1236 EXPORT_SYMBOL_GPL(css_general_characteristics);
1237 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1239 int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
1242 struct chsc_header request;
1244 unsigned int op : 8;
1245 unsigned int rsvd1 : 8;
1246 unsigned int ctrl : 16;
1247 unsigned int rsvd2[5];
1248 struct chsc_header response;
1249 unsigned int rsvd3[3];
1251 unsigned int rsvd4[2];
1255 memset(page, 0, PAGE_SIZE);
1257 rr->request.length = 0x0020;
1258 rr->request.code = 0x0033;
1264 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1266 *clock_delta = rr->clock_delta;
1270 int chsc_sstpi(void *page, void *result, size_t size)
1273 struct chsc_header request;
1274 unsigned int rsvd0[3];
1275 struct chsc_header response;
1280 memset(page, 0, PAGE_SIZE);
1282 rr->request.length = 0x0010;
1283 rr->request.code = 0x0038;
1287 memcpy(result, &rr->data, size);
1288 return (rr->response.code == 0x0001) ? 0 : -EIO;
1291 int chsc_siosl(struct subchannel_id schid)
1294 struct chsc_header request;
1296 struct subchannel_id sid;
1298 struct chsc_header response;
1301 unsigned long flags;
1305 spin_lock_irqsave(&chsc_page_lock, flags);
1306 memset(chsc_page, 0, PAGE_SIZE);
1307 siosl_area = chsc_page;
1308 siosl_area->request.length = 0x0010;
1309 siosl_area->request.code = 0x0046;
1310 siosl_area->word1 = 0x80000000;
1311 siosl_area->sid = schid;
1313 ccode = chsc(siosl_area);
1319 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1320 schid.ssid, schid.sch_no, ccode);
1323 rc = chsc_error_from_response(siosl_area->response.code);
1325 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1326 schid.ssid, schid.sch_no,
1327 siosl_area->response.code);
1329 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1330 schid.ssid, schid.sch_no);
1332 spin_unlock_irqrestore(&chsc_page_lock, flags);
1335 EXPORT_SYMBOL_GPL(chsc_siosl);
1338 * chsc_scm_info() - store SCM information (SSI)
1339 * @scm_area: request and response block for SSI
1340 * @token: continuation token
1342 * Returns 0 on success.
1344 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1348 memset(scm_area, 0, sizeof(*scm_area));
1349 scm_area->request.length = 0x0020;
1350 scm_area->request.code = 0x004C;
1351 scm_area->reqtok = token;
1353 ccode = chsc(scm_area);
1355 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1358 ret = chsc_error_from_response(scm_area->response.code);
1360 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1361 scm_area->response.code);
1365 EXPORT_SYMBOL_GPL(chsc_scm_info);
1368 * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
1369 * @schid: id of the subchannel on which PNSO is performed
1370 * @brinfo_area: request and response block for the operation
1371 * @resume_token: resume token for multiblock response
1372 * @cnc: Boolean change-notification control
1374 * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
1376 * Returns 0 on success.
1378 int chsc_pnso_brinfo(struct subchannel_id schid,
1379 struct chsc_pnso_area *brinfo_area,
1380 struct chsc_brinfo_resume_token resume_token,
1383 memset(brinfo_area, 0, sizeof(*brinfo_area));
1384 brinfo_area->request.length = 0x0030;
1385 brinfo_area->request.code = 0x003d; /* network-subchannel operation */
1386 brinfo_area->m = schid.m;
1387 brinfo_area->ssid = schid.ssid;
1388 brinfo_area->sch = schid.sch_no;
1389 brinfo_area->cssid = schid.cssid;
1390 brinfo_area->oc = 0; /* Store-network-bridging-information list */
1391 brinfo_area->resume_token = resume_token;
1392 brinfo_area->n = (cnc != 0);
1393 if (chsc(brinfo_area))
1395 return chsc_error_from_response(brinfo_area->response.code);
1397 EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
1399 int chsc_sgib(u32 origin)
1402 struct chsc_header request;
1408 /* operation data area begin */
1413 u8 reserved06[4029];
1414 struct chsc_header response;
1419 spin_lock_irq(&chsc_page_lock);
1420 memset(chsc_page, 0, PAGE_SIZE);
1421 sgib_area = chsc_page;
1422 sgib_area->request.length = 0x0fe0;
1423 sgib_area->request.code = 0x0021;
1424 sgib_area->op = 0x1;
1425 sgib_area->gib_origin = origin;
1427 ret = chsc(sgib_area);
1429 ret = chsc_error_from_response(sgib_area->response.code);
1430 spin_unlock_irq(&chsc_page_lock);
1434 EXPORT_SYMBOL_GPL(chsc_sgib);