s390/sclp: unify basic sclp access by exposing "struct sclp"
[deliverable/linux.git] / drivers / s390 / char / sclp_early.c
1 /*
2 * SCLP early driver
3 *
4 * Copyright IBM Corp. 2013
5 */
6
7 #define KMSG_COMPONENT "sclp_early"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <asm/ctl_reg.h>
11 #include <asm/sclp.h>
12 #include <asm/ipl.h>
13 #include "sclp_sdias.h"
14 #include "sclp.h"
15
16 #define SCLP_CMDW_READ_SCP_INFO 0x00020001
17 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
18
19 struct read_info_sccb {
20 struct sccb_header header; /* 0-7 */
21 u16 rnmax; /* 8-9 */
22 u8 rnsize; /* 10 */
23 u8 _pad_11[16 - 11]; /* 11-15 */
24 u16 ncpurl; /* 16-17 */
25 u16 cpuoff; /* 18-19 */
26 u8 _pad_20[24 - 20]; /* 20-23 */
27 u8 loadparm[8]; /* 24-31 */
28 u8 _pad_32[42 - 32]; /* 32-41 */
29 u8 fac42; /* 42 */
30 u8 fac43; /* 43 */
31 u8 _pad_44[48 - 44]; /* 44-47 */
32 u64 facilities; /* 48-55 */
33 u8 _pad_56[66 - 56]; /* 56-65 */
34 u8 fac66; /* 66 */
35 u8 _pad_67[76 - 67]; /* 67-83 */
36 u32 ibc; /* 76-79 */
37 u8 _pad80[84 - 80]; /* 80-83 */
38 u8 fac84; /* 84 */
39 u8 fac85; /* 85 */
40 u8 _pad_86[91 - 86]; /* 86-90 */
41 u8 flags; /* 91 */
42 u8 _pad_92[100 - 92]; /* 92-99 */
43 u32 rnsize2; /* 100-103 */
44 u64 rnmax2; /* 104-111 */
45 u8 _pad_112[120 - 112]; /* 112-119 */
46 u16 hcpua; /* 120-121 */
47 u8 _pad_122[4096 - 122]; /* 122-4095 */
48 } __packed __aligned(PAGE_SIZE);
49
50 static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
51 static struct sclp_ipl_info sclp_ipl_info;
52 static unsigned int sclp_mtid_max;
53
54 struct sclp_info sclp;
55 EXPORT_SYMBOL(sclp);
56 u64 sclp_facilities;
57
58 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
59 {
60 int rc;
61
62 __ctl_set_bit(0, 9);
63 rc = sclp_service_call(cmd, sccb);
64 if (rc)
65 goto out;
66 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
67 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
68 local_irq_disable();
69 out:
70 /* Contents of the sccb might have changed. */
71 barrier();
72 __ctl_clear_bit(0, 9);
73 return rc;
74 }
75
76 static int __init sclp_read_info_early(struct read_info_sccb *sccb)
77 {
78 int rc, i;
79 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
80 SCLP_CMDW_READ_SCP_INFO};
81
82 for (i = 0; i < ARRAY_SIZE(commands); i++) {
83 do {
84 memset(sccb, 0, sizeof(*sccb));
85 sccb->header.length = sizeof(*sccb);
86 sccb->header.function_code = 0x80;
87 sccb->header.control_mask[2] = 0x80;
88 rc = sclp_cmd_sync_early(commands[i], sccb);
89 } while (rc == -EBUSY);
90
91 if (rc)
92 break;
93 if (sccb->header.response_code == 0x10)
94 return 0;
95 if (sccb->header.response_code != 0x1f0)
96 break;
97 }
98 return -EIO;
99 }
100
101 static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
102 {
103 struct sclp_cpu_entry *cpue;
104 u16 boot_cpu_address, cpu;
105
106 if (sclp_read_info_early(sccb))
107 return;
108
109 sclp_facilities = sccb->facilities;
110 sclp.has_sprp = !!(sccb->fac84 & 0x02);
111 sclp.has_cpu_type = !!(sccb->fac84 & 0x01);
112 if (sccb->fac85 & 0x02)
113 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
114 sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
115 sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
116 sclp.rzm <<= 20;
117 sclp.ibc = sccb->ibc;
118
119 if (!sccb->hcpua) {
120 if (MACHINE_IS_VM)
121 sclp.max_cpu = 64;
122 else
123 sclp.max_cpu = sccb->ncpurl;
124 } else {
125 sclp.max_cpu = sccb->hcpua + 1;
126 }
127
128 boot_cpu_address = stap();
129 cpue = (void *)sccb + sccb->cpuoff;
130 for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
131 if (boot_cpu_address != cpue->core_id)
132 continue;
133 sclp.has_siif = cpue->siif;
134 sclp.has_sigpif = cpue->sigpif;
135 break;
136 }
137
138 /* Save IPL information */
139 sclp_ipl_info.is_valid = 1;
140 if (sccb->flags & 0x2)
141 sclp_ipl_info.has_dump = 1;
142 memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
143
144 sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
145 sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
146 sclp_mtid_max = max(sclp.mtid, sclp.mtid_cp);
147 sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
148 }
149
150 unsigned int sclp_get_mtid(u8 cpu_type)
151 {
152 return cpu_type ? sclp.mtid : sclp.mtid_cp;
153 }
154
155 unsigned int sclp_get_mtid_max(void)
156 {
157 return sclp_mtid_max;
158 }
159
160 /*
161 * This function will be called after sclp_facilities_detect(), which gets
162 * called from early.c code. The sclp_facilities_detect() function retrieves
163 * and saves the IPL information.
164 */
165 void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
166 {
167 *info = sclp_ipl_info;
168 }
169
170 static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
171 {
172 int rc;
173
174 do {
175 rc = sclp_cmd_sync_early(cmd, sccb);
176 } while (rc == -EBUSY);
177
178 if (rc)
179 return -EIO;
180 if (((struct sccb_header *) sccb)->response_code != 0x0020)
181 return -EIO;
182 return 0;
183 }
184
185 static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
186 {
187 memset(sccb, 0, sizeof(*sccb));
188
189 sccb->hdr.length = sizeof(*sccb);
190 sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
191 sccb->evbuf.hdr.type = EVTYP_SDIAS;
192 sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
193 sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
194 sccb->evbuf.event_id = 4712;
195 sccb->evbuf.dbs = 1;
196 }
197
198 static int __init sclp_set_event_mask(struct init_sccb *sccb,
199 unsigned long receive_mask,
200 unsigned long send_mask)
201 {
202 memset(sccb, 0, sizeof(*sccb));
203 sccb->header.length = sizeof(*sccb);
204 sccb->mask_length = sizeof(sccb_mask_t);
205 sccb->receive_mask = receive_mask;
206 sccb->send_mask = send_mask;
207 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
208 }
209
210 static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
211 {
212 sccb_init_eq_size(sccb);
213 if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
214 return -EIO;
215 if (sccb->evbuf.blk_cnt == 0)
216 return 0;
217 return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
218 }
219
220 static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
221 {
222 memset(sccb, 0, PAGE_SIZE);
223 sccb->length = PAGE_SIZE;
224 if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
225 return -EIO;
226 if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
227 return 0;
228 return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
229 }
230
231 static void __init sclp_hsa_size_detect(void *sccb)
232 {
233 long size;
234
235 /* First try synchronous interface (LPAR) */
236 if (sclp_set_event_mask(sccb, 0, 0x40000010))
237 return;
238 size = sclp_hsa_size_init(sccb);
239 if (size < 0)
240 return;
241 if (size != 0)
242 goto out;
243 /* Then try asynchronous interface (z/VM) */
244 if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
245 return;
246 size = sclp_hsa_size_init(sccb);
247 if (size < 0)
248 return;
249 size = sclp_hsa_copy_wait(sccb);
250 if (size < 0)
251 return;
252 out:
253 sclp.hsa_size = size;
254 }
255
256 static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
257 {
258 if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
259 return 0;
260 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
261 return 0;
262 return 1;
263 }
264
265 static void __init sclp_console_detect(struct init_sccb *sccb)
266 {
267 if (sccb->header.response_code != 0x20)
268 return;
269
270 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
271 sclp.has_vt220 = 1;
272
273 if (sclp_con_check_linemode(sccb))
274 sclp.has_linemode = 1;
275 }
276
277 void __init sclp_early_detect(void)
278 {
279 void *sccb = &sccb_early;
280
281 sclp_facilities_detect(sccb);
282 sclp_hsa_size_detect(sccb);
283
284 /* Turn off SCLP event notifications. Also save remote masks in the
285 * sccb. These are sufficient to detect sclp console capabilities.
286 */
287 sclp_set_event_mask(sccb, 0, 0);
288 sclp_console_detect(sccb);
289 }
This page took 0.053188 seconds and 5 git commands to generate.