coreboot
coreboot is an Open Source project aimed at replacing the proprietary BIOS found in most computers.
smm_module_loader.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <acpi/acpi_gnvs.h>
4 #include <stddef.h>
5 #include <stdint.h>
6 #include <string.h>
7 #include <rmodule.h>
8 #include <cbmem.h>
9 #include <cpu/x86/smm.h>
10 #include <commonlib/helpers.h>
11 #include <console/console.h>
13 
14 #define FXSAVE_SIZE 512
15 #define SMM_CODE_SEGMENT_SIZE 0x10000
16 /* FXSAVE area during relocation. While it may not be strictly needed the
17  SMM stub code relies on the FXSAVE area being non-zero to enable SSE
18  instructions within SMM mode. */
19 static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
20 __attribute__((aligned(16)));
21 
22 /*
23  * Components that make up the SMRAM:
24  * 1. Save state - the total save state memory used
25  * 2. Stack - stacks for the CPUs in the SMM handler
26  * 3. Stub - SMM stub code for calling into handler
27  * 4. Handler - C-based SMM handler.
28  *
29  * The components are assumed to consist of one consecutive region.
30  */
31 
32 /*
33  * The stub is the entry point that sets up protected mode and stacks for each
34  * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
35  */
36 extern unsigned char _binary_smmstub_start[];
37 
38 /* Per CPU minimum stack size. */
39 #define SMM_MINIMUM_STACK_SIZE 32
40 
41 struct cpu_smm_info {
48 };
49 struct cpu_smm_info cpus[CONFIG_MAX_CPUS] = { 0 };
50 
51 /*
52  * This method creates a map of all the CPU entry points, save state locations
53  * and the beginning and end of code segments for each CPU. This map is used
54  * during relocation to properly align as many CPUs that can fit into the SMRAM
55  * region. For more information on how SMRAM works, refer to the latest Intel
56  * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
57  * following regions:
58  * +-----------------+ Top of SMRAM
59  * | | <- MSEG, FXSAVE
60  * +-----------------+
61  * | common |
62  * | smi handler | 64K
63  * | |
64  * +-----------------+
65  * | CPU 0 code seg |
66  * +-----------------+
67  * | CPU 1 code seg |
68  * +-----------------+
69  * | CPU x code seg |
70  * +-----------------+
71  * | |
72  * | |
73  * +-----------------+
74  * | stacks |
75  * +-----------------+ <- START of SMRAM
76  *
77  * The code below checks when a code segment is full and begins placing the remainder
78  * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
79  * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
80  * area grows downward into the CPUs entry point. Therefore staggering too many
81  * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
82  * downward.
83  * input : smbase of first CPU (all other CPUs
84  * will go below this address)
85  * input : num_cpus in the system. The map will
86  * be created from 0 to num_cpus.
87  */
88 static int smm_create_map(uintptr_t smbase, unsigned int num_cpus,
89  const struct smm_loader_params *params)
90 {
91  unsigned int i;
92  struct rmodule smm_stub;
93  unsigned int ss_size = params->per_cpu_save_state_size, stub_size;
94  unsigned int smm_entry_offset = SMM_ENTRY_OFFSET;
95  unsigned int seg_count = 0, segments = 0, available;
96  unsigned int cpus_in_segment = 0;
97  unsigned int base = smbase;
98 
99  if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
100  printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
101  return 0;
102  }
103 
104  stub_size = rmodule_memory_size(&smm_stub);
105  /* How many CPUs can fit into one 64K segment? */
106  available = 0xFFFF - smm_entry_offset - ss_size - stub_size;
107  if (available > 0) {
108  cpus_in_segment = available / ss_size;
109  /* minimum segments needed will always be 1 */
110  segments = num_cpus / cpus_in_segment + 1;
112  "%s: cpus allowed in one segment %d\n", __func__, cpus_in_segment);
114  "%s: min # of segments needed %d\n", __func__, segments);
115  } else {
116  printk(BIOS_ERR, "%s: not enough space in SMM to setup all CPUs\n", __func__);
117  printk(BIOS_ERR, " save state & stub size need to be reduced\n");
118  printk(BIOS_ERR, " or increase SMRAM size\n");
119  return 0;
120  }
121 
122  if (ARRAY_SIZE(cpus) < num_cpus) {
124  "%s: increase MAX_CPUS in Kconfig\n", __func__);
125  return 0;
126  }
127 
128  for (i = 0; i < num_cpus; i++) {
129  printk(BIOS_DEBUG, "CPU 0x%x\n", i);
130  cpus[i].smbase = base;
131  cpus[i].entry = base + smm_entry_offset;
132  printk(BIOS_DEBUG, " smbase %lx entry %lx\n", cpus[i].smbase,
133  cpus[i].entry);
134  cpus[i].ss_start = cpus[i].entry + (smm_entry_offset - ss_size);
135  cpus[i].code_start = cpus[i].entry;
136  cpus[i].code_end = cpus[i].entry + stub_size;
137  printk(BIOS_DEBUG, " ss_start %lx code_end %lx\n", cpus[i].ss_start,
138  cpus[i].code_end);
139  cpus[i].active = 1;
140  base -= ss_size;
141  seg_count++;
142  if (seg_count >= cpus_in_segment) {
143  base -= smm_entry_offset;
144  seg_count = 0;
145  printk(BIOS_DEBUG, "-------------NEW CODE SEGMENT --------------\n");
146  }
147  }
148 
149  return 1;
150 }
151 
152 /*
153  * This method expects the smm relocation map to be complete.
154  * This method does not read any HW registers, it simply uses a
155  * map that was created during SMM setup.
156  * input: cpu_num - cpu number which is used as an index into the
157  * map to return the smbase
158  */
159 u32 smm_get_cpu_smbase(unsigned int cpu_num)
160 {
161  if (cpu_num < CONFIG_MAX_CPUS) {
162  if (cpus[cpu_num].active)
163  return cpus[cpu_num].smbase;
164  }
165  return 0;
166 }
167 
168 /*
169  * This method assumes that at least 1 CPU has been set up from
170  * which it will place other CPUs below its smbase ensuring that
171  * save state does not clobber the first CPUs init code segment. The init
172  * code which is the smm stub code is the same for all CPUs. They enter
173  * smm, setup stacks (based on their apic id), enter protected mode
174  * and then jump to the common smi handler. The stack is allocated
175  * at the beginning of smram (aka tseg base, not smbase). The stack
176  * pointer for each CPU is calculated by using its apic id
177  * (code is in smm_stub.s)
178  * Each entry point will now have the same stub code which, sets up the CPU
179  * stack, enters protected mode and then jumps to the smi handler. It is
180  * important to enter protected mode before the jump because the "jump to
181  * address" might be larger than the 20bit address supported by real mode.
182  * SMI entry right now is in real mode.
183  * input: smbase - this is the smbase of the first cpu not the smbase
184  * where tseg starts (aka smram_start). All CPUs code segment
185  * and stack will be below this point except for the common
186  * SMI handler which is one segment above
187  * input: num_cpus - number of cpus that need relocation including
188  * the first CPU (though its code is already loaded)
189  * input: top of stack (stacks work downward by default in Intel HW)
190  * output: return -1, if runtime smi code could not be installed. In
191  * this case SMM will not work and any SMI's generated will
192  * cause a CPU shutdown or general protection fault because
193  * the appropriate smi handling code was not installed
194  */
195 
196 static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus,
198 {
199  unsigned int i;
200  unsigned int size;
201 
202  /*
203  * Ensure there was enough space and the last CPUs smbase
204  * did not encroach upon the stack. Stack top is smram start
205  * + size of stack.
206  */
207  if (cpus[num_cpus].active) {
208  if (cpus[num_cpus - 1].smbase + SMM_ENTRY_OFFSET < stack_top) {
209  printk(BIOS_ERR, "%s: stack encroachment\n", __func__);
210  printk(BIOS_ERR, "%s: smbase %lx, stack_top %lx\n",
211  __func__, cpus[num_cpus].smbase, stack_top);
212  return 0;
213  }
214  }
215 
216  printk(BIOS_INFO, "%s: smbase %lx, stack_top %lx\n",
217  __func__, cpus[num_cpus-1].smbase, stack_top);
218 
219  /* start at 1, the first CPU stub code is already there */
220  size = cpus[0].code_end - cpus[0].code_start;
221  for (i = 1; i < num_cpus; i++) {
222  memcpy((int *)cpus[i].code_start, (int *)cpus[0].code_start, size);
224  "SMM Module: placing smm entry code at %lx, cpu # 0x%x\n",
225  cpus[i].code_start, i);
226  printk(BIOS_DEBUG, "%s: copying from %lx to %lx 0x%x bytes\n",
227  __func__, cpus[0].code_start, cpus[i].code_start, size);
228  }
229  return 1;
230 }
231 
233 static size_t g_stack_size;
234 
235 int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size,
236  const unsigned int total_cpus, const size_t stack_size)
237 {
238  /* Need a minimum stack size and alignment. */
239  if (stack_size <= SMM_MINIMUM_STACK_SIZE || (stack_size & 3) != 0) {
240  printk(BIOS_ERR, "%s: need minimum stack size\n", __func__);
241  return -1;
242  }
243 
244  const size_t total_stack_size = total_cpus * stack_size;
245  if (total_stack_size >= perm_smram_size) {
246  printk(BIOS_ERR, "%s: Stack won't fit smram\n", __func__);
247  return -1;
248  }
249  stack_top = perm_smbase + total_stack_size;
250  g_stack_size = stack_size;
251  return 0;
252 }
253 
254 /*
255  * Place the staggered entry points for each CPU. The entry points are
256  * staggered by the per CPU SMM save state size extending down from
257  * SMM_ENTRY_OFFSET.
258  */
260  const struct smm_loader_params *params, const struct rmodule *smm_stub)
261 {
262  size_t stub_entry_offset;
263  int rc = 1;
264  stub_entry_offset = rmodule_entry_offset(smm_stub);
265  /* Each CPU now has its own stub code, which enters protected mode,
266  * sets up the stack, and then jumps to common SMI handler
267  */
268  if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
270  params->num_concurrent_save_states,
271  stack_top, params);
272  }
273  return rc;
274 }
275 
276 /*
277  * The stub setup code assumes it is completely contained within the
278  * default SMRAM size (0x10000) for the default SMI handler (entry at
279  * 0x30000), but no assumption should be made for the permanent SMI handler.
280  * The placement of CPU entry points for permanent handler are determined
281  * by the number of CPUs in the system and the amount of SMRAM.
282  * There are potentially 2 regions to place
283  * within the default SMRAM size:
284  * 1. Save state areas
285  * 2. Stub code
286  *
287  * The save state always lives at the top of the CPUS smbase (and the entry
288  * point is at offset 0x8000). This allows only a certain number of CPUs with
289  * staggered entry points until the save state area comes down far enough to
290  * overwrite/corrupt the entry code (stub code). Therefore, an SMM map is
291  * created to avoid this corruption, see smm_create_map() above.
292  * This module setup code works for the default (0x30000) SMM handler setup and the
293  * permanent SMM handler.
294  * The CPU stack is decided at runtime in the stub and is treaded as a continuous
295  * region. As this might not fit the default SMRAM region, the same region used
296  * by the permanent handler can be used during relocation.
297  */
298 static int smm_module_setup_stub(const uintptr_t smbase, const size_t smm_size,
299  struct smm_loader_params *params,
300  void *const fxsave_area)
301 {
302  size_t total_save_state_size;
303  size_t smm_stub_size;
304  uintptr_t smm_stub_loc;
305  size_t size;
306  uintptr_t base;
307  size_t i;
308  struct smm_stub_params *stub_params;
309  struct rmodule smm_stub;
310  base = smbase;
311  size = smm_size;
312 
313  /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
314  if (params->num_cpus > CONFIG_MAX_CPUS) {
315  printk(BIOS_ERR, "%s: not enough stacks\n", __func__);
316  return -1;
317  }
318 
319  /* Fail if can't parse the smm stub rmodule. */
320  if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
321  printk(BIOS_ERR, "%s: unable to parse smm stub\n", __func__);
322  return -1;
323  }
324 
325  /* Adjust remaining size to account for save state. */
326  total_save_state_size = params->per_cpu_save_state_size *
327  params->num_concurrent_save_states;
328  if (total_save_state_size > size) {
330  "%s: more state save space needed:need -> %zx:available->%zx\n",
331  __func__, total_save_state_size, size);
332  return -1;
333  }
334 
335  size -= total_save_state_size;
336 
337  /* The save state size encroached over the first SMM entry point. */
338  if (size <= SMM_ENTRY_OFFSET) {
339  printk(BIOS_ERR, "%s: encroachment over SMM entry point\n", __func__);
340  printk(BIOS_ERR, "%s: state save size: %zx : smm_entry_offset -> %zx\n",
341  __func__, size, (size_t)SMM_ENTRY_OFFSET);
342  return -1;
343  }
344 
345  smm_stub_size = rmodule_memory_size(&smm_stub);
346 
347  /* Put the stub at the main entry point */
348  smm_stub_loc = base + SMM_ENTRY_OFFSET;
349 
350  /* Stub is too big to fit. */
351  if (smm_stub_size > (size - SMM_ENTRY_OFFSET)) {
352  printk(BIOS_ERR, "%s: stub is too big to fit\n", __func__);
353  return -1;
354  }
355 
356  if (stack_top == 0) {
357  printk(BIOS_ERR, "%s: error assigning stacks\n", __func__);
358  return -1;
359  }
360  /* Load the stub. */
361  if (rmodule_load((void *)smm_stub_loc, &smm_stub)) {
362  printk(BIOS_ERR, "%s: load module failed\n", __func__);
363  return -1;
364  }
365 
366  if (!smm_stub_place_staggered_entry_points((void *)base, params, &smm_stub)) {
367  printk(BIOS_ERR, "%s: staggered entry points failed\n", __func__);
368  return -1;
369  }
370 
371  /* Setup the parameters for the stub code. */
372  stub_params = rmodule_parameters(&smm_stub);
373  stub_params->stack_top = stack_top;
374  stub_params->stack_size = g_stack_size;
375  stub_params->c_handler = (uintptr_t)params->handler;
376  stub_params->fxsave_area = (uintptr_t)fxsave_area;
377  stub_params->fxsave_area_size = FXSAVE_SIZE;
378 
380  "%s: stack_top = 0x%x\n", __func__, stub_params->stack_top);
381  printk(BIOS_DEBUG, "%s: per cpu stack_size = 0x%x\n",
382  __func__, stub_params->stack_size);
383  printk(BIOS_DEBUG, "%s: runtime.start32_offset = 0x%x\n", __func__,
384  stub_params->start32_offset);
385  printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n",
386  __func__, smm_size);
387 
388  /* Initialize the APIC id to CPU number table to be 1:1 */
389  for (i = 0; i < params->num_cpus; i++)
390  stub_params->apic_id_to_cpu[i] = i;
391 
392  /* Allow the initiator to manipulate SMM stub parameters. */
393  params->stub_params = stub_params;
394 
395  printk(BIOS_DEBUG, "SMM Module: stub loaded at %lx. Will call %p\n",
396  smm_stub_loc, params->handler);
397  return 0;
398 }
399 
400 /*
401  * smm_setup_relocation_handler assumes the callback is already loaded in
402  * memory. i.e. Another SMM module isn't chained to the stub. The other
403  * assumption is that the stub will be entered from the default SMRAM
404  * location: 0x30000 -> 0x40000.
405  */
407 {
408  uintptr_t smram = SMM_DEFAULT_BASE;
409  printk(BIOS_SPEW, "%s: enter\n", __func__);
410  /* There can't be more than 1 concurrent save state for the relocation
411  * handler because all CPUs default to 0x30000 as SMBASE. */
412  if (params->num_concurrent_save_states > 1)
413  return -1;
414 
415  /* A handler has to be defined to call for relocation. */
416  if (params->handler == NULL)
417  return -1;
418 
419  /* Since the relocation handler always uses stack, adjust the number
420  * of concurrent stack users to be CONFIG_MAX_CPUS. */
421  if (params->num_cpus == 0)
422  params->num_cpus = CONFIG_MAX_CPUS;
423 
424  printk(BIOS_SPEW, "%s: exit\n", __func__);
427 }
428 
429 static int smm_load_module_aseg(const uintptr_t smram_base, const size_t smram_size,
430  struct smm_loader_params *params);
431 
432 /*
433  *The SMM module is placed within the provided region in the following
434  * manner:
435  * +-----------------+ <- smram + size
436  * | BIOS resource |
437  * | list (STM) |
438  * +-----------------+
439  * | fxsave area |
440  * +-----------------+
441  * | smi handler |
442  * | ... |
443  * +-----------------+ <- cpu0
444  * | stub code | <- cpu1
445  * | stub code | <- cpu2
446  * | stub code | <- cpu3, etc
447  * | |
448  * | |
449  * | |
450  * | stacks |
451  * +-----------------+ <- smram start
452 
453  * It should be noted that this algorithm will not work for
454  * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
455  * expects a region large enough to encompass the handler and stacks
456  * as well as the SMM_DEFAULT_SIZE.
457  */
458 int smm_load_module(const uintptr_t smram_base, const size_t smram_size,
459  struct smm_loader_params *params)
460 {
461  struct rmodule smm_mod;
462  struct smm_runtime *handler_mod_params;
463  size_t total_stack_size;
464  size_t handler_size;
465  size_t module_alignment;
466  size_t alignment_size;
467  size_t fxsave_size;
468  void *fxsave_area;
469  size_t total_size = 0;
470  uintptr_t base; /* The base for the permanent handler */
471  const struct cbmem_entry *cbmemc;
472 
473  if (CONFIG(SMM_ASEG))
474  return smm_load_module_aseg(smram_base, smram_size, params);
475 
476  if (smram_size <= SMM_DEFAULT_SIZE)
477  return -1;
478 
479  /* Load main SMI handler at the top of SMRAM
480  * everything else will go below
481  */
482  base = smram_base;
483  base += smram_size;
484 
485  /* Fail if can't parse the smm rmodule. */
486  if (rmodule_parse(&_binary_smm_start, &smm_mod))
487  return -1;
488 
489  /* Clear SMM region */
490  if (CONFIG(DEBUG_SMI))
491  memset((void *)smram_base, 0xcd, smram_size);
492 
493  total_stack_size = stack_top - smram_base;
494  total_size += total_stack_size;
495  /* Stacks are the base of SMRAM */
496 
497  /* MSEG starts at the top of SMRAM and works down */
498  if (CONFIG(STM)) {
499  base -= CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
500  total_size += CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;
501  }
502 
503  /* FXSAVE goes below MSEG */
504  if (CONFIG(SSE)) {
505  fxsave_size = FXSAVE_SIZE * params->num_cpus;
506  fxsave_area = (char *)base - fxsave_size;
507  base -= fxsave_size;
508  total_size += fxsave_size;
509  } else {
510  fxsave_size = 0;
511  fxsave_area = NULL;
512  }
513 
514  handler_size = rmodule_memory_size(&smm_mod);
515  base -= handler_size;
516  total_size += handler_size;
517  module_alignment = rmodule_load_alignment(&smm_mod);
518  alignment_size = module_alignment - (base % module_alignment);
519  if (alignment_size != module_alignment) {
520  handler_size += alignment_size;
521  base += alignment_size;
522  }
523 
525  "%s: total_smm_space_needed %zx, available -> %zx\n",
526  __func__, total_size, smram_size);
527 
528  /* Does the required amount of memory exceed the SMRAM region size? */
529  if (total_size > smram_size) {
530  printk(BIOS_ERR, "%s: need more SMRAM\n", __func__);
531  return -1;
532  }
533  if (handler_size > SMM_CODE_SEGMENT_SIZE) {
534  printk(BIOS_ERR, "%s: increase SMM_CODE_SEGMENT_SIZE: handler_size = %zx\n",
535  __func__, handler_size);
536  return -1;
537  }
538 
539  if (rmodule_load((void *)base, &smm_mod))
540  return -1;
541 
542  params->handler = rmodule_entry(&smm_mod);
543  handler_mod_params = rmodule_parameters(&smm_mod);
544  handler_mod_params->smbase = smram_base;
545  handler_mod_params->smm_size = smram_size;
546  handler_mod_params->save_state_size = params->real_cpu_save_state_size;
547  handler_mod_params->num_cpus = params->num_cpus;
548  handler_mod_params->gnvs_ptr = (uintptr_t)acpi_get_gnvs();
549 
550  if (CONFIG(CONSOLE_CBMEM) && (cbmemc = cbmem_entry_find(CBMEM_ID_CONSOLE))) {
551  handler_mod_params->cbmemc = cbmem_entry_start(cbmemc);
552  handler_mod_params->cbmemc_size = cbmem_entry_size(cbmemc);
553  } else {
554  handler_mod_params->cbmemc = 0;
555  handler_mod_params->cbmemc_size = 0;
556  }
557 
558  printk(BIOS_DEBUG, "%s: smram_start: 0x%lx\n", __func__, smram_base);
559  printk(BIOS_DEBUG, "%s: smram_end: %lx\n", __func__, smram_base + smram_size);
560  printk(BIOS_DEBUG, "%s: handler start %p\n",
561  __func__, params->handler);
562  printk(BIOS_DEBUG, "%s: handler_size %zx\n",
563  __func__, handler_size);
564  printk(BIOS_DEBUG, "%s: fxsave_area %p\n",
565  __func__, fxsave_area);
566  printk(BIOS_DEBUG, "%s: fxsave_size %zx\n",
567  __func__, fxsave_size);
568  printk(BIOS_DEBUG, "%s: CONFIG_MSEG_SIZE 0x%x\n",
569  __func__, CONFIG_MSEG_SIZE);
570  printk(BIOS_DEBUG, "%s: CONFIG_BIOS_RESOURCE_LIST_SIZE 0x%x\n",
571  __func__, CONFIG_BIOS_RESOURCE_LIST_SIZE);
572 
573  printk(BIOS_DEBUG, "%s: handler_mod_params.smbase = 0x%x\n", __func__,
574  handler_mod_params->smbase);
575  printk(BIOS_DEBUG, "%s: per_cpu_save_state_size = 0x%x\n", __func__,
576  handler_mod_params->save_state_size);
577  printk(BIOS_DEBUG, "%s: num_cpus = 0x%x\n", __func__, handler_mod_params->num_cpus);
578  printk(BIOS_DEBUG, "%s: cbmemc = %p, cbmemc_size = %#x\n", __func__,
579  handler_mod_params->cbmemc, handler_mod_params->cbmemc_size);
580  printk(BIOS_DEBUG, "%s: total_save_state_size = 0x%x\n", __func__,
581  (handler_mod_params->save_state_size * handler_mod_params->num_cpus));
582 
583  /* CPU 0 smbase goes first, all other CPUs
584  * will be staggered below
585  */
587  printk(BIOS_DEBUG, "%s: cpu0 entry: %lx\n", __func__, base);
588 
589  if (!smm_create_map(base, params->num_concurrent_save_states, params)) {
590  printk(BIOS_ERR, "%s: Error creating CPU map\n", __func__);
591  return -1;
592  }
593 
594  for (int i = 0; i < params->num_cpus; i++) {
595  handler_mod_params->save_state_top[i] =
596  cpus[i].ss_start + params->per_cpu_save_state_size;
597  }
598 
599  return smm_module_setup_stub(base, smram_size, params, fxsave_area);
600 }
601 
602 /*
603  *The SMM module is placed within the provided region in the following
604  * manner:
605  * +-----------------+ <- smram + size == 0x10000
606  * | save states |
607  * +-----------------+
608  * | fxsave area |
609  * +-----------------+
610  * | smi handler |
611  * | ... |
612  * +-----------------+ <- cpu0
613  * | stub code | <- cpu1
614  * | stub code | <- cpu2
615  * | stub code | <- cpu3, etc
616  * | |
617  * | |
618  * | |
619  * | stacks |
620  * +-----------------+ <- smram start = 0xA0000
621  */
622 static int smm_load_module_aseg(const uintptr_t smram_base, const size_t smram_size,
623  struct smm_loader_params *params)
624 {
625  struct rmodule smm_mod;
626  struct smm_runtime *handler_mod_params;
627 
628  if (smram_size != SMM_DEFAULT_SIZE)
629  return -1;
630 
631  if (smram_base != SMM_BASE)
632  return -1;
633 
634  /* Fail if can't parse the smm rmodule. */
635  if (rmodule_parse(&_binary_smm_start, &smm_mod))
636  return -1;
637 
638  if (!smm_create_map(smram_base, params->num_concurrent_save_states, params)) {
639  printk(BIOS_ERR, "%s: Error creating CPU map\n", __func__);
640  return -1;
641  }
642 
643  const uintptr_t entry0_end = cpus[0].code_end;
644  const uintptr_t save_state_base = cpus[params->num_cpus - 1].ss_start;
645  const size_t fxsave_size = FXSAVE_SIZE * params->num_cpus;
646  const uintptr_t fxsave_base = ALIGN_DOWN(save_state_base - fxsave_size, 16);
647 
648  if (fxsave_base <= entry0_end) {
649  printk(BIOS_ERR, "%s, fxsave %lx won't fit smram\n", __func__, fxsave_base);
650  return -1;
651  }
652 
653  const size_t handler_size = rmodule_memory_size(&smm_mod);
654  const size_t module_alignment = rmodule_load_alignment(&smm_mod);
655  const uintptr_t module_base = ALIGN_DOWN(fxsave_base - handler_size, module_alignment);
656 
657  if (module_base <= entry0_end) {
658  printk(BIOS_ERR, "%s, module won't fit smram\n", __func__);
659  return -1;
660  }
661 
662  if (rmodule_load((void *)module_base, &smm_mod))
663  return -1;
664 
665  params->handler = rmodule_entry(&smm_mod);
666  handler_mod_params = rmodule_parameters(&smm_mod);
667  handler_mod_params->smbase = smram_base;
668  handler_mod_params->smm_size = smram_size;
669  handler_mod_params->save_state_size = params->real_cpu_save_state_size;
670  handler_mod_params->num_cpus = params->num_cpus;
671  handler_mod_params->gnvs_ptr = (uintptr_t)acpi_get_gnvs();
672 
673  for (int i = 0; i < params->num_cpus; i++) {
674  handler_mod_params->save_state_top[i] =
675  cpus[i].ss_start + params->per_cpu_save_state_size;
676  }
677 
678  printk(BIOS_DEBUG, "%s: smram_start: 0x%lx\n", __func__, smram_base);
679  printk(BIOS_DEBUG, "%s: smram_end: %lx\n", __func__, smram_base + smram_size);
680  printk(BIOS_DEBUG, "%s: handler start %p\n", __func__, params->handler);
681  printk(BIOS_DEBUG, "%s: handler_size %zx\n", __func__, handler_size);
682  printk(BIOS_DEBUG, "%s: fxsave_area %lx\n", __func__, fxsave_base);
683  printk(BIOS_DEBUG, "%s: fxsave_size %zx\n", __func__, fxsave_size);
684 
685  printk(BIOS_DEBUG, "%s: handler_mod_params.smbase = 0x%x\n", __func__,
686  handler_mod_params->smbase);
687  printk(BIOS_DEBUG, "%s: per_cpu_save_state_size = 0x%x\n", __func__,
688  handler_mod_params->save_state_size);
689  printk(BIOS_DEBUG, "%s: num_cpus = 0x%x\n", __func__, handler_mod_params->num_cpus);
690  printk(BIOS_DEBUG, "%s: total_save_state_size = 0x%x\n", __func__,
691  (handler_mod_params->save_state_size * handler_mod_params->num_cpus));
692 
693  return smm_module_setup_stub(smram_base, smram_size, params, (void *)fxsave_base);
694 }
void * acpi_get_gnvs(void)
Definition: gnvs.c:40
void * memcpy(void *dest, const void *src, size_t n)
Definition: memcpy.c:7
void * memset(void *dstpp, int c, size_t len)
Definition: memset.c:12
static struct sdram_info params
Definition: sdram_configs.c:83
#define ARRAY_SIZE(a)
Definition: helpers.h:12
#define ALIGN_DOWN(x, a)
Definition: helpers.h:18
void * cbmem_entry_start(const struct cbmem_entry *entry)
Definition: imd_cbmem.c:190
const struct cbmem_entry * cbmem_entry_find(u32 id)
Definition: imd_cbmem.c:157
u64 cbmem_entry_size(const struct cbmem_entry *entry)
Definition: imd_cbmem.c:185
#define CBMEM_ID_CONSOLE
Definition: cbmem_id.h:19
#define printk(level,...)
Definition: stdlib.h:16
static size_t smm_stub_size(void)
Definition: mp_init.c:1045
@ CONFIG
Definition: dsi_common.h:201
unsigned char _binary_smm_start[]
#define SMM_BASE
Definition: smm.h:14
#define SMM_DEFAULT_SIZE
Definition: smm.h:11
#define SMM_DEFAULT_BASE
Definition: smm.h:10
#define SMM_ENTRY_OFFSET
Definition: smm.h:16
#define BIOS_INFO
BIOS_INFO - Expected events.
Definition: loglevel.h:113
#define BIOS_DEBUG
BIOS_DEBUG - Verbose output.
Definition: loglevel.h:128
#define BIOS_ERR
BIOS_ERR - System in incomplete state.
Definition: loglevel.h:72
#define BIOS_SPEW
BIOS_SPEW - Excessively verbose output.
Definition: loglevel.h:142
int rmodule_entry_offset(const struct rmodule *m)
Definition: rmodule.c:80
void * rmodule_entry(const struct rmodule *m)
Definition: rmodule.c:86
int rmodule_parse(void *ptr, struct rmodule *m)
Definition: rmodule.c:31
void * rmodule_parameters(const struct rmodule *m)
Definition: rmodule.c:68
int rmodule_memory_size(const struct rmodule *m)
Definition: rmodule.c:63
int rmodule_load(void *loc, struct rmodule *m)
Definition: rmodule.c:171
int rmodule_load_alignment(const struct rmodule *m)
Definition: rmodule.c:162
static int smm_module_setup_stub(const uintptr_t smbase, const size_t smm_size, struct smm_loader_params *params, void *const fxsave_area)
unsigned char _binary_smmstub_start[]
static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus, uintptr_t stack_top, const struct smm_loader_params *params)
static int smm_load_module_aseg(const uintptr_t smram_base, const size_t smram_size, struct smm_loader_params *params)
int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size, const unsigned int total_cpus, const size_t stack_size)
static int smm_stub_place_staggered_entry_points(char *base, const struct smm_loader_params *params, const struct rmodule *smm_stub)
#define SMM_CODE_SEGMENT_SIZE
static int smm_create_map(uintptr_t smbase, unsigned int num_cpus, const struct smm_loader_params *params)
int smm_setup_relocation_handler(struct smm_loader_params *params)
struct cpu_smm_info cpus[CONFIG_MAX_CPUS]
#define SMM_MINIMUM_STACK_SIZE
static uintptr_t stack_top
static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
static size_t g_stack_size
u32 smm_get_cpu_smbase(unsigned int cpu_num)
#define FXSAVE_SIZE
int smm_load_module(const uintptr_t smram_base, const size_t smram_size, struct smm_loader_params *params)
uintptr_t base
Definition: uart.c:17
#define DEBUG_SMI
Definition: smihandler.c:12
#define NULL
Definition: stddef.h:19
uint32_t u32
Definition: stdint.h:51
unsigned long uintptr_t
Definition: stdint.h:21
unsigned char uint8_t
Definition: stdint.h:8
uintptr_t code_start
uintptr_t smbase
uintptr_t ss_start
uintptr_t code_end
u32 smm_size
Definition: smm.h:63
u32 num_cpus
Definition: smm.h:65
u32 smbase
Definition: smm.h:62
void * cbmemc
Definition: smm.h:68
u32 cbmemc_size
Definition: smm.h:67
uintptr_t save_state_top[CONFIG_MAX_CPUS]
Definition: smm.h:69
u32 save_state_size
Definition: smm.h:64
u32 gnvs_ptr
Definition: smm.h:66
u32 stack_size
Definition: smm.h:83
u32 c_handler
Definition: smm.h:85
u32 start32_offset
Definition: smm.h:96
u32 fxsave_area_size
Definition: smm.h:87
u32 stack_top
Definition: smm.h:84
u16 apic_id_to_cpu[CONFIG_MAX_CPUS]
Definition: smm.h:94
u32 fxsave_area
Definition: smm.h:86