1 // SPDX-License-Identifier: GPL-2.0
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright IBM Corp. 1999,2012
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/mutex.h>
19 #include <linux/pci.h>
22 #include <asm/chpid.h>
26 #include <asm/ebcdic.h>
30 #include "cio_debug.h"
35 static void *sei_page;
36 static void *chsc_page;
37 static DEFINE_SPINLOCK(chsc_page_lock);
40 * chsc_error_from_response() - convert a chsc response to an error
41 * @response: chsc response code
43 * Returns an appropriate Linux error code for @response.
45 int chsc_error_from_response(int response)
61 case 0x0107: /* "Channel busy" for the op 0x003d */
70 EXPORT_SYMBOL_GPL(chsc_error_from_response);
72 struct chsc_ssd_area {
73 struct chsc_header request;
77 u16 f_sch; /* first subchannel */
79 u16 l_sch; /* last subchannel */
81 struct chsc_header response;
85 u8 st : 3; /* subchannel type */
87 u8 unit_addr; /* unit address */
88 u16 devno; /* device number */
91 u16 sch; /* subchannel */
92 u8 chpid[8]; /* chpids 0-7 */
93 u16 fla[8]; /* full link addresses 0-7 */
94 } __attribute__ ((packed));
96 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
98 struct chsc_ssd_area *ssd_area;
105 spin_lock_irqsave(&chsc_page_lock, flags);
106 memset(chsc_page, 0, PAGE_SIZE);
107 ssd_area = chsc_page;
108 ssd_area->request.length = 0x0010;
109 ssd_area->request.code = 0x0004;
110 ssd_area->ssid = schid.ssid;
111 ssd_area->f_sch = schid.sch_no;
112 ssd_area->l_sch = schid.sch_no;
114 ccode = chsc(ssd_area);
115 /* Check response. */
117 ret = (ccode == 3) ? -ENODEV : -EBUSY;
120 ret = chsc_error_from_response(ssd_area->response.code);
122 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
123 schid.ssid, schid.sch_no,
124 ssd_area->response.code);
127 if (!ssd_area->sch_valid) {
133 memset(ssd, 0, sizeof(struct chsc_ssd_info));
134 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
135 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
137 ssd->path_mask = ssd_area->path_mask;
138 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
139 for (i = 0; i < 8; i++) {
141 if (ssd_area->path_mask & mask) {
142 chp_id_init(&ssd->chpid[i]);
143 ssd->chpid[i].id = ssd_area->chpid[i];
145 if (ssd_area->fla_valid_mask & mask)
146 ssd->fla[i] = ssd_area->fla[i];
149 spin_unlock_irqrestore(&chsc_page_lock, flags);
154 * chsc_ssqd() - store subchannel QDIO data (SSQD)
155 * @schid: id of the subchannel on which SSQD is performed
156 * @ssqd: request and response block for SSQD
158 * Returns 0 on success.
160 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
162 memset(ssqd, 0, sizeof(*ssqd));
163 ssqd->request.length = 0x0010;
164 ssqd->request.code = 0x0024;
165 ssqd->first_sch = schid.sch_no;
166 ssqd->last_sch = schid.sch_no;
167 ssqd->ssid = schid.ssid;
172 return chsc_error_from_response(ssqd->response.code);
174 EXPORT_SYMBOL_GPL(chsc_ssqd);
177 * chsc_sadc() - set adapter device controls (SADC)
178 * @schid: id of the subchannel on which SADC is performed
179 * @scssc: request and response block for SADC
180 * @summary_indicator_addr: summary indicator address
181 * @subchannel_indicator_addr: subchannel indicator address
183 * Returns 0 on success.
185 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
186 u64 summary_indicator_addr, u64 subchannel_indicator_addr)
188 memset(scssc, 0, sizeof(*scssc));
189 scssc->request.length = 0x0fe0;
190 scssc->request.code = 0x0021;
191 scssc->operation_code = 0;
193 scssc->summary_indicator_addr = summary_indicator_addr;
194 scssc->subchannel_indicator_addr = subchannel_indicator_addr;
196 scssc->ks = PAGE_DEFAULT_KEY >> 4;
197 scssc->kc = PAGE_DEFAULT_KEY >> 4;
198 scssc->isc = QDIO_AIRQ_ISC;
199 scssc->schid = schid;
201 /* enable the time delay disablement facility */
202 if (css_general_characteristics.aif_tdd)
203 scssc->word_with_d_bit = 0x10000000;
208 return chsc_error_from_response(scssc->response.code);
210 EXPORT_SYMBOL_GPL(chsc_sadc);
212 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
214 spin_lock_irq(sch->lock);
215 if (sch->driver && sch->driver->chp_event)
216 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
218 spin_unlock_irq(sch->lock);
223 spin_unlock_irq(sch->lock);
224 css_schedule_eval(sch->schid);
228 void chsc_chp_offline(struct chp_id chpid)
230 struct channel_path *chp = chpid_to_chp(chpid);
231 struct chp_link link;
234 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
235 CIO_TRACE_EVENT(2, dbf_txt);
237 if (chp_get_status(chpid) <= 0)
239 memset(&link, 0, sizeof(struct chp_link));
241 /* Wait until previous actions have settled. */
242 css_wait_for_slow_path();
244 mutex_lock(&chp->lock);
245 chp_update_desc(chp);
246 mutex_unlock(&chp->lock);
248 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
251 static int __s390_process_res_acc(struct subchannel *sch, void *data)
253 spin_lock_irq(sch->lock);
254 if (sch->driver && sch->driver->chp_event)
255 sch->driver->chp_event(sch, data, CHP_ONLINE);
256 spin_unlock_irq(sch->lock);
261 static void s390_process_res_acc(struct chp_link *link)
265 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
267 CIO_TRACE_EVENT( 2, dbf_txt);
268 if (link->fla != 0) {
269 sprintf(dbf_txt, "fla%x", link->fla);
270 CIO_TRACE_EVENT( 2, dbf_txt);
272 /* Wait until previous actions have settled. */
273 css_wait_for_slow_path();
275 * I/O resources may have become accessible.
276 * Scan through all subchannels that may be concerned and
277 * do a validation on those.
278 * The more information we have (info), the less scanning
279 * will we have to do.
281 for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
282 css_schedule_reprobe();
285 struct chsc_sei_nt0_area {
287 u8 vf; /* validity flags */
288 u8 rs; /* reporting source */
289 u8 cc; /* content code */
290 u16 fla; /* full link address */
291 u16 rsid; /* reporting source id */
294 /* ccdf has to be big enough for a link-incident record */
295 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
298 struct chsc_sei_nt2_area {
299 u8 flags; /* p and v bit */
302 u8 cc; /* content code */
304 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
307 #define CHSC_SEI_NT0 (1ULL << 63)
308 #define CHSC_SEI_NT2 (1ULL << 61)
311 struct chsc_header request;
313 u64 ntsm; /* notification type mask */
314 struct chsc_header response;
318 struct chsc_sei_nt0_area nt0_area;
319 struct chsc_sei_nt2_area nt2_area;
320 u8 nt_area[PAGE_SIZE - 24];
325 * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
328 #define ND_VALIDITY_VALID 0
329 #define ND_VALIDITY_OUTDATED 1
330 #define ND_VALIDITY_INVALID 2
332 struct node_descriptor {
342 /* Node parameters. */
348 char manufacturer[3];
355 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
358 #define LIR_IQ_CLASS_INFO 0
359 #define LIR_IQ_CLASS_DEGRADED 1
360 #define LIR_IQ_CLASS_NOT_OPERATIONAL 2
371 struct node_descriptor incident_node;
372 struct node_descriptor attached_node;
376 #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
377 #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
379 /* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
380 static char *store_ebcdic(char *dest, const char *src, unsigned long len,
383 memcpy(dest, src, len);
392 /* Format node ID and parameters for output in LIR log message. */
393 static void format_node_data(char *params, char *id, struct node_descriptor *nd)
395 memset(params, 0, PARAMS_LEN);
396 memset(id, 0, NODEID_LEN);
398 if (nd->validity != ND_VALIDITY_VALID) {
399 strncpy(params, "n/a", PARAMS_LEN - 1);
400 strncpy(id, "n/a", NODEID_LEN - 1);
404 /* PARAMS=xx,xxxxxx */
405 snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
406 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
407 id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
408 id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
409 id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
410 id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
411 id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
412 sprintf(id, "%04X", nd->tag);
415 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
417 struct lir *lir = (struct lir *) &sei_area->ccdf;
418 char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
419 aunodeid[NODEID_LEN];
421 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
422 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
424 /* Ignore NULL Link Incident Records. */
428 /* Inform user that a link requires maintenance actions because it has
429 * become degraded or not operational. Note that this log message is
430 * the primary intention behind a Link Incident Record. */
432 format_node_data(iuparams, iunodeid, &lir->incident_node);
433 format_node_data(auparams, aunodeid, &lir->attached_node);
435 switch (lir->iq.class) {
436 case LIR_IQ_CLASS_DEGRADED:
437 pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
438 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
439 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
440 iunodeid, auparams, aunodeid);
442 case LIR_IQ_CLASS_NOT_OPERATIONAL:
443 pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
444 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
445 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
446 iunodeid, auparams, aunodeid);
453 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
455 struct chp_link link;
459 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
460 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
461 if (sei_area->rs != 4)
464 chpid.id = sei_area->rsid;
465 /* allocate a new channel path structure, if needed */
466 status = chp_get_status(chpid);
471 memset(&link, 0, sizeof(struct chp_link));
473 if ((sei_area->vf & 0xc0) != 0) {
474 link.fla = sei_area->fla;
475 if ((sei_area->vf & 0xc0) == 0xc0)
476 /* full link address */
477 link.fla_mask = 0xffff;
480 link.fla_mask = 0xff00;
482 s390_process_res_acc(&link);
485 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
487 struct channel_path *chp;
492 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
493 if (sei_area->rs != 0)
495 data = sei_area->ccdf;
497 for (num = 0; num <= __MAX_CHPID; num++) {
498 if (!chp_test_bit(data, num))
502 CIO_CRW_EVENT(4, "Update information for channel path "
503 "%x.%02x\n", chpid.cssid, chpid.id);
504 chp = chpid_to_chp(chpid);
509 mutex_lock(&chp->lock);
510 chp_update_desc(chp);
511 mutex_unlock(&chp->lock);
515 struct chp_config_data {
521 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
523 struct chp_config_data *data;
526 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
528 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
529 if (sei_area->rs != 0)
531 data = (struct chp_config_data *) &(sei_area->ccdf);
533 for (num = 0; num <= __MAX_CHPID; num++) {
534 if (!chp_test_bit(data->map, num))
537 pr_notice("Processing %s for channel path %x.%02x\n",
538 events[data->op], chpid.cssid, chpid.id);
541 chp_cfg_schedule(chpid, 1);
544 chp_cfg_schedule(chpid, 0);
547 chp_cfg_cancel_deconfigure(chpid);
553 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
557 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
558 if (sei_area->rs != 7)
561 ret = scm_update_information();
563 CIO_CRW_EVENT(0, "chsc: updating change notification"
564 " failed (rc=%d).\n", ret);
567 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
571 CIO_CRW_EVENT(4, "chsc: scm available information\n");
572 if (sei_area->rs != 7)
575 ret = scm_process_availability_information();
577 CIO_CRW_EVENT(0, "chsc: process availability information"
578 " failed (rc=%d).\n", ret);
581 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
583 switch (sei_area->cc) {
585 zpci_event_error(sei_area->ccdf);
588 zpci_event_availability(sei_area->ccdf);
591 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
597 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
599 /* which kind of information was stored? */
600 switch (sei_area->cc) {
601 case 1: /* link incident*/
602 chsc_process_sei_link_incident(sei_area);
604 case 2: /* i/o resource accessibility */
605 chsc_process_sei_res_acc(sei_area);
607 case 7: /* channel-path-availability information */
608 chsc_process_sei_chp_avail(sei_area);
610 case 8: /* channel-path-configuration notification */
611 chsc_process_sei_chp_config(sei_area);
613 case 12: /* scm change notification */
614 chsc_process_sei_scm_change(sei_area);
616 case 14: /* scm available notification */
617 chsc_process_sei_scm_avail(sei_area);
619 default: /* other stuff */
620 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
625 /* Check if we might have lost some information. */
626 if (sei_area->flags & 0x40) {
627 CIO_CRW_EVENT(2, "chsc: event overflow\n");
628 css_schedule_eval_all();
632 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
634 static int ntsm_unsupported;
637 memset(sei, 0, sizeof(*sei));
638 sei->request.length = 0x0010;
639 sei->request.code = 0x000e;
640 if (!ntsm_unsupported)
646 if (sei->response.code != 0x0001) {
647 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
648 sei->response.code, sei->ntsm);
650 if (sei->response.code == 3 && sei->ntsm) {
651 /* Fallback for old firmware. */
652 ntsm_unsupported = 1;
658 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
661 chsc_process_sei_nt0(&sei->u.nt0_area);
664 chsc_process_sei_nt2(&sei->u.nt2_area);
667 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
671 if (!(sei->u.nt0_area.flags & 0x80))
677 * Handle channel subsystem related CRWs.
678 * Use store event information to find out what's going on.
680 * Note: Access to sei_page is serialized through machine check handler
681 * thread, so no need for locking.
683 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
685 struct chsc_sei *sei = sei_page;
688 css_schedule_eval_all();
691 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
692 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
693 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
694 crw0->erc, crw0->rsid);
696 CIO_TRACE_EVENT(2, "prcss");
697 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
700 void chsc_chp_online(struct chp_id chpid)
702 struct channel_path *chp = chpid_to_chp(chpid);
703 struct chp_link link;
706 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
707 CIO_TRACE_EVENT(2, dbf_txt);
709 if (chp_get_status(chpid) != 0) {
710 memset(&link, 0, sizeof(struct chp_link));
712 /* Wait until previous actions have settled. */
713 css_wait_for_slow_path();
715 mutex_lock(&chp->lock);
716 chp_update_desc(chp);
717 mutex_unlock(&chp->lock);
719 for_each_subchannel_staged(__s390_process_res_acc, NULL,
721 css_schedule_reprobe();
725 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
726 struct chp_id chpid, int on)
729 struct chp_link link;
731 memset(&link, 0, sizeof(struct chp_link));
733 spin_lock_irqsave(sch->lock, flags);
734 if (sch->driver && sch->driver->chp_event)
735 sch->driver->chp_event(sch, &link,
736 on ? CHP_VARY_ON : CHP_VARY_OFF);
737 spin_unlock_irqrestore(sch->lock, flags);
740 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
742 struct chp_id *chpid = data;
744 __s390_subchannel_vary_chpid(sch, *chpid, 0);
748 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
750 struct chp_id *chpid = data;
752 __s390_subchannel_vary_chpid(sch, *chpid, 1);
757 * chsc_chp_vary - propagate channel-path vary operation to subchannels
758 * @chpid: channl-path ID
759 * @on: non-zero for vary online, zero for vary offline
761 int chsc_chp_vary(struct chp_id chpid, int on)
763 struct channel_path *chp = chpid_to_chp(chpid);
765 /* Wait until previous actions have settled. */
766 css_wait_for_slow_path();
768 * Redo PathVerification on the devices the chpid connects to
771 /* Try to update the channel path description. */
772 chp_update_desc(chp);
773 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
775 css_schedule_reprobe();
777 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
784 chsc_remove_cmg_attr(struct channel_subsystem *css)
788 for (i = 0; i <= __MAX_CHPID; i++) {
791 chp_remove_cmg_attr(css->chps[i]);
796 chsc_add_cmg_attr(struct channel_subsystem *css)
801 for (i = 0; i <= __MAX_CHPID; i++) {
804 ret = chp_add_cmg_attr(css->chps[i]);
810 for (--i; i >= 0; i--) {
813 chp_remove_cmg_attr(css->chps[i]);
818 int __chsc_do_secm(struct channel_subsystem *css, int enable)
821 struct chsc_header request;
822 u32 operation_code : 2;
831 struct chsc_header response;
836 } __attribute__ ((packed)) *secm_area;
840 spin_lock_irqsave(&chsc_page_lock, flags);
841 memset(chsc_page, 0, PAGE_SIZE);
842 secm_area = chsc_page;
843 secm_area->request.length = 0x0050;
844 secm_area->request.code = 0x0016;
846 secm_area->key = PAGE_DEFAULT_KEY >> 4;
847 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
848 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
850 secm_area->operation_code = enable ? 0 : 1;
852 ccode = chsc(secm_area);
854 ret = (ccode == 3) ? -ENODEV : -EBUSY;
858 switch (secm_area->response.code) {
864 ret = chsc_error_from_response(secm_area->response.code);
867 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
868 secm_area->response.code);
870 spin_unlock_irqrestore(&chsc_page_lock, flags);
875 chsc_secm(struct channel_subsystem *css, int enable)
879 if (enable && !css->cm_enabled) {
880 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
881 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
882 if (!css->cub_addr1 || !css->cub_addr2) {
883 free_page((unsigned long)css->cub_addr1);
884 free_page((unsigned long)css->cub_addr2);
888 ret = __chsc_do_secm(css, enable);
890 css->cm_enabled = enable;
891 if (css->cm_enabled) {
892 ret = chsc_add_cmg_attr(css);
894 __chsc_do_secm(css, 0);
898 chsc_remove_cmg_attr(css);
900 if (!css->cm_enabled) {
901 free_page((unsigned long)css->cub_addr1);
902 free_page((unsigned long)css->cub_addr2);
907 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
908 int c, int m, void *page)
910 struct chsc_scpd *scpd_area;
913 if ((rfmt == 1 || rfmt == 0) && c == 1 &&
914 !css_general_characteristics.fcs)
916 if ((rfmt == 2) && !css_general_characteristics.cib)
919 memset(page, 0, PAGE_SIZE);
921 scpd_area->request.length = 0x0010;
922 scpd_area->request.code = 0x0002;
923 scpd_area->cssid = chpid.cssid;
924 scpd_area->first_chpid = chpid.id;
925 scpd_area->last_chpid = chpid.id;
928 scpd_area->fmt = fmt;
929 scpd_area->rfmt = rfmt;
931 ccode = chsc(scpd_area);
933 return (ccode == 3) ? -ENODEV : -EBUSY;
935 ret = chsc_error_from_response(scpd_area->response.code);
937 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
938 scpd_area->response.code);
941 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
943 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
944 struct channel_path_desc *desc)
946 struct chsc_scpd *scpd_area;
950 spin_lock_irqsave(&chsc_page_lock, flags);
951 scpd_area = chsc_page;
952 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
956 memcpy(desc, scpd_area->data, sizeof(*desc));
958 spin_unlock_irqrestore(&chsc_page_lock, flags);
962 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
963 struct channel_path_desc_fmt1 *desc)
965 struct chsc_scpd *scpd_area;
969 spin_lock_irqsave(&chsc_page_lock, flags);
970 scpd_area = chsc_page;
971 ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area);
975 memcpy(desc, scpd_area->data, sizeof(*desc));
977 spin_unlock_irqrestore(&chsc_page_lock, flags);
982 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
983 struct cmg_chars *chars)
987 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
988 mask = 0x80 >> (i + 3);
990 chp->cmg_chars.values[i] = chars->values[i];
992 chp->cmg_chars.values[i] = 0;
996 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1002 struct chsc_header request;
1004 u32 first_chpid : 8;
1008 struct chsc_header response;
1019 u32 data[NR_MEASUREMENT_CHARS];
1020 } __attribute__ ((packed)) *scmc_area;
1025 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1028 spin_lock_irqsave(&chsc_page_lock, flags);
1029 memset(chsc_page, 0, PAGE_SIZE);
1030 scmc_area = chsc_page;
1031 scmc_area->request.length = 0x0010;
1032 scmc_area->request.code = 0x0022;
1033 scmc_area->first_chpid = chp->chpid.id;
1034 scmc_area->last_chpid = chp->chpid.id;
1036 ccode = chsc(scmc_area);
1038 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1042 ret = chsc_error_from_response(scmc_area->response.code);
1044 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
1045 scmc_area->response.code);
1048 if (scmc_area->not_valid)
1051 chp->cmg = scmc_area->cmg;
1052 chp->shared = scmc_area->shared;
1053 if (chp->cmg != 2 && chp->cmg != 3) {
1054 /* No cmg-dependent data. */
1057 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1058 (struct cmg_chars *) &scmc_area->data);
1060 spin_unlock_irqrestore(&chsc_page_lock, flags);
1064 int __init chsc_init(void)
1068 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1069 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1070 if (!sei_page || !chsc_page) {
1074 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
1079 free_page((unsigned long)chsc_page);
1080 free_page((unsigned long)sei_page);
1084 void __init chsc_init_cleanup(void)
1086 crw_unregister_handler(CRW_RSC_CSS);
1087 free_page((unsigned long)chsc_page);
1088 free_page((unsigned long)sei_page);
1091 int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1095 sda_area->request.length = 0x0400;
1096 sda_area->request.code = 0x0031;
1097 sda_area->operation_code = operation_code;
1099 ret = chsc(sda_area);
1101 ret = (ret == 3) ? -ENODEV : -EBUSY;
1105 switch (sda_area->response.code) {
1110 ret = chsc_error_from_response(sda_area->response.code);
1116 int chsc_enable_facility(int operation_code)
1118 struct chsc_sda_area *sda_area;
1119 unsigned long flags;
1122 spin_lock_irqsave(&chsc_page_lock, flags);
1123 memset(chsc_page, 0, PAGE_SIZE);
1124 sda_area = chsc_page;
1126 ret = __chsc_enable_facility(sda_area, operation_code);
1128 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1129 operation_code, sda_area->response.code);
1131 spin_unlock_irqrestore(&chsc_page_lock, flags);
1135 int __init chsc_get_cssid(int idx)
1138 struct chsc_header request;
1142 struct chsc_header response;
1148 } __packed *sdcal_area;
1151 spin_lock_irq(&chsc_page_lock);
1152 memset(chsc_page, 0, PAGE_SIZE);
1153 sdcal_area = chsc_page;
1154 sdcal_area->request.length = 0x0020;
1155 sdcal_area->request.code = 0x0034;
1156 sdcal_area->atype = 4;
1158 ret = chsc(sdcal_area);
1160 ret = (ret == 3) ? -ENODEV : -EBUSY;
1164 ret = chsc_error_from_response(sdcal_area->response.code);
1166 CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
1167 sdcal_area->response.code);
1171 if ((addr_t) &sdcal_area->list[idx] <
1172 (addr_t) &sdcal_area->response + sdcal_area->response.length)
1173 ret = sdcal_area->list[idx].cssid;
1177 spin_unlock_irq(&chsc_page_lock);
1181 struct css_general_char css_general_characteristics;
1182 struct css_chsc_char css_chsc_characteristics;
1185 chsc_determine_css_characteristics(void)
1187 unsigned long flags;
1190 struct chsc_header request;
1194 struct chsc_header response;
1196 u32 general_char[510];
1198 } __attribute__ ((packed)) *scsc_area;
1200 spin_lock_irqsave(&chsc_page_lock, flags);
1201 memset(chsc_page, 0, PAGE_SIZE);
1202 scsc_area = chsc_page;
1203 scsc_area->request.length = 0x0010;
1204 scsc_area->request.code = 0x0010;
1206 result = chsc(scsc_area);
1208 result = (result == 3) ? -ENODEV : -EBUSY;
1212 result = chsc_error_from_response(scsc_area->response.code);
1214 memcpy(&css_general_characteristics, scsc_area->general_char,
1215 sizeof(css_general_characteristics));
1216 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1217 sizeof(css_chsc_characteristics));
1219 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1220 scsc_area->response.code);
1222 spin_unlock_irqrestore(&chsc_page_lock, flags);
1226 EXPORT_SYMBOL_GPL(css_general_characteristics);
1227 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1229 int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
1232 struct chsc_header request;
1234 unsigned int op : 8;
1235 unsigned int rsvd1 : 8;
1236 unsigned int ctrl : 16;
1237 unsigned int rsvd2[5];
1238 struct chsc_header response;
1239 unsigned int rsvd3[3];
1241 unsigned int rsvd4[2];
1242 } __attribute__ ((packed)) *rr;
1245 memset(page, 0, PAGE_SIZE);
1247 rr->request.length = 0x0020;
1248 rr->request.code = 0x0033;
1254 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1256 *clock_delta = rr->clock_delta;
1260 int chsc_sstpi(void *page, void *result, size_t size)
1263 struct chsc_header request;
1264 unsigned int rsvd0[3];
1265 struct chsc_header response;
1267 } __attribute__ ((packed)) *rr;
1270 memset(page, 0, PAGE_SIZE);
1272 rr->request.length = 0x0010;
1273 rr->request.code = 0x0038;
1277 memcpy(result, &rr->data, size);
1278 return (rr->response.code == 0x0001) ? 0 : -EIO;
1281 int chsc_siosl(struct subchannel_id schid)
1284 struct chsc_header request;
1286 struct subchannel_id sid;
1288 struct chsc_header response;
1290 } __attribute__ ((packed)) *siosl_area;
1291 unsigned long flags;
1295 spin_lock_irqsave(&chsc_page_lock, flags);
1296 memset(chsc_page, 0, PAGE_SIZE);
1297 siosl_area = chsc_page;
1298 siosl_area->request.length = 0x0010;
1299 siosl_area->request.code = 0x0046;
1300 siosl_area->word1 = 0x80000000;
1301 siosl_area->sid = schid;
1303 ccode = chsc(siosl_area);
1309 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1310 schid.ssid, schid.sch_no, ccode);
1313 rc = chsc_error_from_response(siosl_area->response.code);
1315 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1316 schid.ssid, schid.sch_no,
1317 siosl_area->response.code);
1319 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1320 schid.ssid, schid.sch_no);
1322 spin_unlock_irqrestore(&chsc_page_lock, flags);
1325 EXPORT_SYMBOL_GPL(chsc_siosl);
1328 * chsc_scm_info() - store SCM information (SSI)
1329 * @scm_area: request and response block for SSI
1330 * @token: continuation token
1332 * Returns 0 on success.
1334 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1338 memset(scm_area, 0, sizeof(*scm_area));
1339 scm_area->request.length = 0x0020;
1340 scm_area->request.code = 0x004C;
1341 scm_area->reqtok = token;
1343 ccode = chsc(scm_area);
1345 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1348 ret = chsc_error_from_response(scm_area->response.code);
1350 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1351 scm_area->response.code);
1355 EXPORT_SYMBOL_GPL(chsc_scm_info);
1358 * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
1359 * @schid: id of the subchannel on which PNSO is performed
1360 * @brinfo_area: request and response block for the operation
1361 * @resume_token: resume token for multiblock response
1362 * @cnc: Boolean change-notification control
1364 * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
1366 * Returns 0 on success.
1368 int chsc_pnso_brinfo(struct subchannel_id schid,
1369 struct chsc_pnso_area *brinfo_area,
1370 struct chsc_brinfo_resume_token resume_token,
1373 memset(brinfo_area, 0, sizeof(*brinfo_area));
1374 brinfo_area->request.length = 0x0030;
1375 brinfo_area->request.code = 0x003d; /* network-subchannel operation */
1376 brinfo_area->m = schid.m;
1377 brinfo_area->ssid = schid.ssid;
1378 brinfo_area->sch = schid.sch_no;
1379 brinfo_area->cssid = schid.cssid;
1380 brinfo_area->oc = 0; /* Store-network-bridging-information list */
1381 brinfo_area->resume_token = resume_token;
1382 brinfo_area->n = (cnc != 0);
1383 if (chsc(brinfo_area))
1385 return chsc_error_from_response(brinfo_area->response.code);
1387 EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);