1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007,2012
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
19 #include <linux/mmzone.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/module.h>
23 #include <asm/ctlreg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
29 #include <asm/facility.h>
30 #include <asm/page-states.h>
34 static void sclp_sync_callback(struct sclp_req *req, void *data)
36 struct completion *completion = data;
41 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
43 return sclp_sync_request_timeout(cmd, sccb, 0);
46 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
48 struct completion completion;
49 struct sclp_req *request;
52 request = kzalloc(sizeof(*request), GFP_KERNEL);
56 request->queue_timeout = timeout;
57 request->command = cmd;
59 request->status = SCLP_REQ_FILLED;
60 request->callback = sclp_sync_callback;
61 request->callback_data = &completion;
62 init_completion(&completion);
64 /* Perform sclp request. */
65 rc = sclp_add_request(request);
68 wait_for_completion(&completion);
71 if (request->status != SCLP_REQ_DONE) {
72 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
73 cmd, request->status);
82 * CPU configuration related functions.
85 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
86 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
88 int _sclp_get_core_info(struct sclp_core_info *info)
91 int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
92 struct read_cpu_info_sccb *sccb;
94 if (!SCLP_HAS_CPU_INFO)
97 sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
100 sccb->header.length = length;
101 sccb->header.control_mask[2] = 0x80;
102 rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
103 SCLP_QUEUE_INTERVAL);
106 if (sccb->header.response_code != 0x0010) {
107 pr_warn("readcpuinfo failed (response=0x%04x)\n",
108 sccb->header.response_code);
112 sclp_fill_core_info(info, sccb);
114 free_pages((unsigned long) sccb, get_order(length));
118 struct cpu_configure_sccb {
119 struct sccb_header header;
120 } __attribute__((packed, aligned(8)));
122 static int do_core_configure(sclp_cmdw_t cmd)
124 struct cpu_configure_sccb *sccb;
127 if (!SCLP_HAS_CPU_RECONFIG)
130 * This is not going to cross a page boundary since we force
131 * kmalloc to have a minimum alignment of 8 bytes on s390.
133 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
136 sccb->header.length = sizeof(*sccb);
137 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
140 switch (sccb->header.response_code) {
145 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
146 cmd, sccb->header.response_code);
155 int sclp_core_configure(u8 core)
157 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
160 int sclp_core_deconfigure(u8 core)
162 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
165 #ifdef CONFIG_MEMORY_HOTPLUG
167 static DEFINE_MUTEX(sclp_mem_mutex);
168 static LIST_HEAD(sclp_mem_list);
169 static u8 sclp_max_storage_id;
170 static DECLARE_BITMAP(sclp_storage_ids, 256);
172 struct memory_increment {
173 struct list_head list;
178 struct assign_storage_sccb {
179 struct sccb_header header;
183 int arch_get_memory_phys_device(unsigned long start_pfn)
187 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
190 static unsigned long long rn2addr(u16 rn)
192 return (unsigned long long) (rn - 1) * sclp.rzm;
195 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
197 struct assign_storage_sccb *sccb;
200 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
203 sccb->header.length = PAGE_SIZE;
205 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
208 switch (sccb->header.response_code) {
213 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
214 cmd, sccb->header.response_code, rn);
219 free_page((unsigned long) sccb);
223 static int sclp_assign_storage(u16 rn)
225 unsigned long long start;
228 rc = do_assign_storage(0x000d0001, rn);
232 storage_key_init_range(start, start + sclp.rzm);
236 static int sclp_unassign_storage(u16 rn)
238 return do_assign_storage(0x000c0001, rn);
241 struct attach_storage_sccb {
242 struct sccb_header header;
249 static int sclp_attach_storage(u8 id)
251 struct attach_storage_sccb *sccb;
255 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
258 sccb->header.length = PAGE_SIZE;
259 sccb->header.function_code = 0x40;
260 rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
261 SCLP_QUEUE_INTERVAL);
264 switch (sccb->header.response_code) {
266 set_bit(id, sclp_storage_ids);
267 for (i = 0; i < sccb->assigned; i++) {
268 if (sccb->entries[i])
269 sclp_unassign_storage(sccb->entries[i] >> 16);
277 free_page((unsigned long) sccb);
281 static int sclp_mem_change_state(unsigned long start, unsigned long size,
284 struct memory_increment *incr;
285 unsigned long long istart;
288 list_for_each_entry(incr, &sclp_mem_list, list) {
289 istart = rn2addr(incr->rn);
290 if (start + size - 1 < istart)
292 if (start > istart + sclp.rzm - 1)
295 rc |= sclp_assign_storage(incr->rn);
297 sclp_unassign_storage(incr->rn);
299 incr->standby = online ? 0 : 1;
301 return rc ? -EIO : 0;
304 static bool contains_standby_increment(unsigned long start, unsigned long end)
306 struct memory_increment *incr;
307 unsigned long istart;
309 list_for_each_entry(incr, &sclp_mem_list, list) {
310 istart = rn2addr(incr->rn);
311 if (end - 1 < istart)
313 if (start > istart + sclp.rzm - 1)
321 static int sclp_mem_notifier(struct notifier_block *nb,
322 unsigned long action, void *data)
324 unsigned long start, size;
325 struct memory_notify *arg;
330 start = arg->start_pfn << PAGE_SHIFT;
331 size = arg->nr_pages << PAGE_SHIFT;
332 mutex_lock(&sclp_mem_mutex);
333 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
334 sclp_attach_storage(id);
336 case MEM_GOING_OFFLINE:
338 * We do not allow to set memory blocks offline that contain
339 * standby memory. This is done to simplify the "memory online"
342 if (contains_standby_increment(start, start + size))
345 case MEM_PREPARE_ONLINE:
347 * Access the altmap_start_pfn and altmap_nr_pages fields
348 * within the struct memory_notify specifically when dealing
349 * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
351 * When altmap is in use, take the specified memory range
352 * online, which includes the altmap.
354 if (arg->altmap_nr_pages) {
355 start = PFN_PHYS(arg->altmap_start_pfn);
356 size += PFN_PHYS(arg->altmap_nr_pages);
358 rc = sclp_mem_change_state(start, size, 1);
359 if (rc || !arg->altmap_nr_pages)
362 * Set CMMA state to nodat here, since the struct page memory
363 * at the beginning of the memory block will not go through the
364 * buddy allocator later.
366 __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
368 case MEM_FINISH_OFFLINE:
370 * When altmap is in use, take the specified memory range
371 * offline, which includes the altmap.
373 if (arg->altmap_nr_pages) {
374 start = PFN_PHYS(arg->altmap_start_pfn);
375 size += PFN_PHYS(arg->altmap_nr_pages);
377 sclp_mem_change_state(start, size, 0);
382 mutex_unlock(&sclp_mem_mutex);
383 return rc ? NOTIFY_BAD : NOTIFY_OK;
386 static struct notifier_block sclp_mem_nb = {
387 .notifier_call = sclp_mem_notifier,
390 static void __init align_to_block_size(unsigned long long *start,
391 unsigned long long *size,
392 unsigned long long alignment)
394 unsigned long long start_align, size_align;
396 start_align = roundup(*start, alignment);
397 size_align = rounddown(*start + *size, alignment) - start_align;
399 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
400 *start, size_align >> 20, *size >> 20);
401 *start = start_align;
405 static void __init add_memory_merged(u16 rn)
407 unsigned long long start, size, addr, block_size;
408 static u16 first_rn, num;
410 if (rn && first_rn && (first_rn + num == rn)) {
416 start = rn2addr(first_rn);
417 size = (unsigned long long) num * sclp.rzm;
418 if (start >= ident_map_size)
420 if (start + size > ident_map_size)
421 size = ident_map_size - start;
422 block_size = memory_block_size_bytes();
423 align_to_block_size(&start, &size, block_size);
426 for (addr = start; addr < start + size; addr += block_size)
427 add_memory(0, addr, block_size,
429 MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
435 static void __init sclp_add_standby_memory(void)
437 struct memory_increment *incr;
439 list_for_each_entry(incr, &sclp_mem_list, list)
441 add_memory_merged(incr->rn);
442 add_memory_merged(0);
445 static void __init insert_increment(u16 rn, int standby, int assigned)
447 struct memory_increment *incr, *new_incr;
448 struct list_head *prev;
451 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
455 new_incr->standby = standby;
457 prev = &sclp_mem_list;
458 list_for_each_entry(incr, &sclp_mem_list, list) {
459 if (assigned && incr->rn > rn)
461 if (!assigned && incr->rn - last_rn > 1)
467 new_incr->rn = last_rn + 1;
468 if (new_incr->rn > sclp.rnmax) {
472 list_add(&new_incr->list, prev);
475 static int __init sclp_detect_standby_memory(void)
477 struct read_storage_sccb *sccb;
478 int i, id, assigned, rc;
480 if (oldmem_data.start) /* No standby memory in kdump mode */
482 if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
485 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
489 for (id = 0; id <= sclp_max_storage_id; id++) {
490 memset(sccb, 0, PAGE_SIZE);
491 sccb->header.length = PAGE_SIZE;
492 rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
495 switch (sccb->header.response_code) {
497 set_bit(id, sclp_storage_ids);
498 for (i = 0; i < sccb->assigned; i++) {
499 if (!sccb->entries[i])
502 insert_increment(sccb->entries[i] >> 16, 0, 1);
508 for (i = 0; i < sccb->assigned; i++) {
509 if (!sccb->entries[i])
512 insert_increment(sccb->entries[i] >> 16, 1, 1);
520 sclp_max_storage_id = sccb->max_id;
522 if (rc || list_empty(&sclp_mem_list))
524 for (i = 1; i <= sclp.rnmax - assigned; i++)
525 insert_increment(0, 1, 0);
526 rc = register_memory_notifier(&sclp_mem_nb);
529 sclp_add_standby_memory();
531 free_page((unsigned long) sccb);
534 __initcall(sclp_detect_standby_memory);
536 #endif /* CONFIG_MEMORY_HOTPLUG */
539 * Channel path configuration related functions.
542 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
543 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
544 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
546 struct chp_cfg_sccb {
547 struct sccb_header header;
551 } __attribute__((packed));
553 static int do_chp_configure(sclp_cmdw_t cmd)
555 struct chp_cfg_sccb *sccb;
558 if (!SCLP_HAS_CHP_RECONFIG)
561 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
564 sccb->header.length = sizeof(*sccb);
565 rc = sclp_sync_request(cmd, sccb);
568 switch (sccb->header.response_code) {
575 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
576 cmd, sccb->header.response_code);
581 free_page((unsigned long) sccb);
586 * sclp_chp_configure - perform configure channel-path sclp command
587 * @chpid: channel-path ID
589 * Perform configure channel-path command sclp command for specified chpid.
590 * Return 0 after command successfully finished, non-zero otherwise.
592 int sclp_chp_configure(struct chp_id chpid)
594 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
598 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
599 * @chpid: channel-path ID
601 * Perform deconfigure channel-path command sclp command for specified chpid
602 * and wait for completion. On success return 0. Return non-zero otherwise.
604 int sclp_chp_deconfigure(struct chp_id chpid)
606 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
609 struct chp_info_sccb {
610 struct sccb_header header;
611 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
612 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
613 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
617 } __attribute__((packed));
620 * sclp_chp_read_info - perform read channel-path information sclp command
621 * @info: resulting channel-path information data
623 * Perform read channel-path information sclp command and wait for completion.
624 * On success, store channel-path information in @info and return 0. Return
625 * non-zero otherwise.
627 int sclp_chp_read_info(struct sclp_chp_info *info)
629 struct chp_info_sccb *sccb;
632 if (!SCLP_HAS_CHP_INFO)
635 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
638 sccb->header.length = sizeof(*sccb);
639 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
642 if (sccb->header.response_code != 0x0010) {
643 pr_warn("read channel-path info failed (response=0x%04x)\n",
644 sccb->header.response_code);
648 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
649 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
650 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
652 free_page((unsigned long) sccb);