coreboot
coreboot is an Open Source project aimed at replacing the proprietary BIOS found in most computers.
mp_init.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <console/console.h>
4 #include <string.h>
5 #include <rmodule.h>
6 #include <commonlib/helpers.h>
7 #include <cpu/cpu.h>
8 #include <cpu/intel/microcode.h>
9 #include <cpu/x86/cache.h>
10 #include <cpu/x86/gdt.h>
11 #include <cpu/x86/lapic.h>
12 #include <cpu/x86/name.h>
13 #include <cpu/x86/msr.h>
14 #include <cpu/x86/mtrr.h>
15 #include <cpu/x86/smm.h>
16 #include <cpu/x86/mp.h>
17 #include <delay.h>
18 #include <device/device.h>
19 #include <device/path.h>
20 #include <smp/atomic.h>
21 #include <smp/spinlock.h>
22 #include <symbols.h>
23 #include <timer.h>
24 #include <thread.h>
25 #include <types.h>
26 
28 
29 #define MAX_APIC_IDS 256
30 
31 struct mp_callback {
32  void (*func)(void *);
33  void *arg;
35 };
36 
37 static char processor_name[49];
38 
39 /*
40  * A mp_flight_record details a sequence of calls for the APs to perform
41  * along with the BSP to coordinate sequencing. Each flight record either
42  * provides a barrier for each AP before calling the callback or the APs
43  * are allowed to perform the callback without waiting. Regardless, each
44  * record has the cpus_entered field incremented for each record. When
45  * the BSP observes that the cpus_entered matches the number of APs
46  * the bsp_call is called with bsp_arg and upon returning releases the
47  * barrier allowing the APs to make further progress.
48  *
49  * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
50  * callback will just not be called.
51  */
58 
59 #define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
60  { \
61  .barrier = ATOMIC_INIT(barrier_), \
62  .cpus_entered = ATOMIC_INIT(0), \
63  .ap_call = ap_func_, \
64  .bsp_call = bsp_func_, \
65  }
66 
67 #define MP_FR_BLOCK_APS(ap_func_, bsp_func_) \
68  _MP_FLIGHT_RECORD(0, ap_func_, bsp_func_)
69 
70 #define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
71  _MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
72 
73 /* The mp_params structure provides the arguments to the mp subsystem
74  * for bringing up APs. */
75 struct mp_params {
76  int num_cpus; /* Total cpus include BSP */
78  const void *microcode_pointer;
79  /* Flight plan for APs and BSP. */
82 };
83 
84 /* This needs to match the layout in the .module_parametrs section. */
85 struct sipi_params {
94  uint32_t microcode_lock; /* 0xffffffff means parallel loading. */
101 
102 /* This also needs to match the assembly code for saved MSR encoding. */
103 struct saved_msr {
107 } __packed;
108 
109 /* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
110 extern char _binary_sipi_vector_start[];
111 
112 /* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
113  * memory range is already reserved so the OS cannot use it. That region is
114  * free to use for AP bringup before SMM is initialized. */
117 
121 };
122 
123 static int global_num_aps;
124 static struct mp_flight_plan mp_info;
125 
126 /* Keep track of device structure for each CPU. */
127 static struct device *cpus_dev[CONFIG_MAX_CPUS];
128 
129 static inline void barrier_wait(atomic_t *b)
130 {
131  while (atomic_read(b) == 0)
132  asm ("pause");
133  mfence();
134 }
135 
136 static inline void release_barrier(atomic_t *b)
137 {
138  mfence();
139  atomic_set(b, 1);
140 }
141 
142 static enum cb_err wait_for_aps(atomic_t *val, int target, int total_delay,
143  int delay_step)
144 {
145  int delayed = 0;
146  while (atomic_read(val) != target) {
147  udelay(delay_step);
148  delayed += delay_step;
149  if (delayed >= total_delay) {
150  /* Not all APs ready before timeout */
151  return CB_ERR;
152  }
153  }
154 
155  /* APs ready before timeout */
156  return CB_SUCCESS;
157 }
158 
159 static void ap_do_flight_plan(void)
160 {
161  int i;
162 
163  for (i = 0; i < mp_info.num_records; i++) {
164  struct mp_flight_record *rec = &mp_info.records[i];
165 
166  atomic_inc(&rec->cpus_entered);
167  barrier_wait(&rec->barrier);
168 
169  if (rec->ap_call != NULL)
170  rec->ap_call();
171  }
172 }
173 
174 static void park_this_cpu(void *unused)
175 {
176  stop_this_cpu();
177 }
178 
179 /* By the time APs call ap_init() caching has been setup, and microcode has
180  * been loaded. */
181 static void asmlinkage ap_init(void)
182 {
183  struct cpu_info *info = cpu_info();
184 
185  /* Ensure the local APIC is enabled */
186  enable_lapic();
188 
189  info->cpu = cpus_dev[info->index];
190 
191  cpu_add_map_entry(info->index);
192 
193  /* Fix up APIC id with reality. */
194  info->cpu->path.apic.apic_id = lapicid();
195 
196  if (cpu_is_intel())
197  printk(BIOS_INFO, "AP: slot %zu apic_id %x, MCU rev: 0x%08x\n", info->index,
198  info->cpu->path.apic.apic_id, get_current_microcode_rev());
199  else
200  printk(BIOS_INFO, "AP: slot %zu apic_id %x\n", info->index,
201  info->cpu->path.apic.apic_id);
202 
203  /* Walk the flight plan */
205 
206  /* Park the AP. */
208 }
209 
211 {
212  sp->gdt = (uintptr_t)&gdt;
213  sp->gdtlimit = (uintptr_t)&gdt_end - (uintptr_t)&gdt - 1;
214  sp->idt_ptr = (uintptr_t)&idtarg;
217  sp->stack_size = CONFIG_STACK_SIZE;
218  sp->stack_top = ALIGN_DOWN((uintptr_t)&_estack, CONFIG_STACK_SIZE);
219 }
220 
221 #define NUM_FIXED_MTRRS 11
222 static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
227 };
228 
229 static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
230 {
231  msr_t msr;
232 
233  msr = rdmsr(index);
234  entry->index = index;
235  entry->lo = msr.lo;
236  entry->hi = msr.hi;
237 
238  /* Return the next entry. */
239  entry++;
240  return entry;
241 }
242 
243 static int save_bsp_msrs(char *start, int size)
244 {
245  int msr_count;
246  int num_var_mtrrs;
247  struct saved_msr *msr_entry;
248  int i;
249 
250  /* Determine number of MTRRs need to be saved. */
251  num_var_mtrrs = get_var_mtrr_count();
252 
253  /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
254  msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
255 
256  if ((msr_count * sizeof(struct saved_msr)) > size) {
257  printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
258  return -1;
259  }
260 
262 
263  msr_entry = (void *)start;
264  for (i = 0; i < NUM_FIXED_MTRRS; i++)
265  msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
266 
267  for (i = 0; i < num_var_mtrrs; i++) {
268  msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
269  msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
270  }
271 
272  msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
273 
275 
276  /* Tell static analysis we know value is left unused. */
277  (void)msr_entry;
278 
279  return msr_count;
280 }
281 
283 {
284  struct rmodule sipi_mod;
285  int module_size;
286  int num_msrs;
287  struct sipi_params *sp;
288  char *mod_loc = (void *)sipi_vector_location;
289  const int loc_size = sipi_vector_location_size;
291 
292  if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
293  printk(BIOS_CRIT, "Unable to parse sipi module.\n");
294  return ap_count;
295  }
296 
297  if (rmodule_entry_offset(&sipi_mod) != 0) {
298  printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
299  return ap_count;
300  }
301 
302  if (rmodule_load_alignment(&sipi_mod) != 4096) {
303  printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
304  rmodule_load_alignment(&sipi_mod));
305  return ap_count;
306  }
307 
308  module_size = rmodule_memory_size(&sipi_mod);
309 
310  /* Align to 4 bytes. */
311  module_size = ALIGN_UP(module_size, 4);
312 
313  if (module_size > loc_size) {
314  printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
315  module_size, loc_size);
316  return ap_count;
317  }
318 
319  num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
320 
321  if (num_msrs < 0) {
322  printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
323  return ap_count;
324  }
325 
326  if (rmodule_load(mod_loc, &sipi_mod)) {
327  printk(BIOS_CRIT, "Unable to load SIPI module.\n");
328  return ap_count;
329  }
330 
331  sp = rmodule_parameters(&sipi_mod);
332 
333  if (sp == NULL) {
334  printk(BIOS_CRIT, "SIPI module has no parameters.\n");
335  return ap_count;
336  }
337 
339  /* Setup MSR table. */
340  sp->msr_table_ptr = (uintptr_t)&mod_loc[module_size];
341  sp->msr_count = num_msrs;
342  /* Provide pointer to microcode patch. */
344  /* Pass on ability to load microcode in parallel. */
346  sp->microcode_lock = ~0;
347  else
348  sp->microcode_lock = 0;
349  sp->c_handler = (uintptr_t)&ap_init;
350  ap_count = &sp->ap_count;
351  atomic_set(ap_count, 0);
352 
353  return ap_count;
354 }
355 
356 static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
357 {
358  int i;
359  int max_cpus;
360  struct cpu_info *info;
361 
362  max_cpus = p->num_cpus;
363  if (max_cpus > CONFIG_MAX_CPUS) {
364  printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
365  max_cpus, CONFIG_MAX_CPUS);
366  max_cpus = CONFIG_MAX_CPUS;
367  }
368 
369  info = cpu_info();
370  for (i = 1; i < max_cpus; i++) {
371  struct device_path cpu_path;
372  struct device *new;
373 
374  /* Build the CPU device path */
375  cpu_path.type = DEVICE_PATH_APIC;
376 
377  /* Assuming linear APIC space allocation. AP will set its own
378  APIC id in the ap_init() path above. */
379  cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
380 
381  /* Allocate the new CPU device structure */
382  new = alloc_find_dev(cpu_bus, &cpu_path);
383  if (new == NULL) {
384  printk(BIOS_CRIT, "Could not allocate CPU device\n");
385  max_cpus--;
386  continue;
387  }
388  new->name = processor_name;
389  cpus_dev[i] = new;
390  }
391 
392  return max_cpus;
393 }
394 
395 static enum cb_err apic_wait_timeout(int total_delay, int delay_step)
396 {
397  int total = 0;
398 
399  while (lapic_busy()) {
400  udelay(delay_step);
401  total += delay_step;
402  if (total >= total_delay) {
403  /* LAPIC not ready before the timeout */
404  return CB_ERR;
405  }
406  }
407 
408  /* LAPIC ready before the timeout */
409  return CB_SUCCESS;
410 }
411 
412 /* Send Startup IPI to APs */
413 static enum cb_err send_sipi_to_aps(int ap_count, atomic_t *num_aps, int sipi_vector)
414 {
415  if (lapic_busy()) {
416  printk(BIOS_DEBUG, "Waiting for ICR not to be busy...\n");
417  if (apic_wait_timeout(1000 /* 1 ms */, 50) != CB_SUCCESS) {
418  printk(BIOS_ERR, "timed out. Aborting.\n");
419  return CB_ERR;
420  }
421  printk(BIOS_DEBUG, "done.\n");
422  }
423 
425  printk(BIOS_DEBUG, "Waiting for SIPI to complete...\n");
426  if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */) != CB_SUCCESS) {
427  printk(BIOS_ERR, "timed out.\n");
428  return CB_ERR;
429  }
430  printk(BIOS_DEBUG, "done.\n");
431  return CB_SUCCESS;
432 }
433 
434 static enum cb_err start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
435 {
436  int sipi_vector;
437  /* Max location is 4KiB below 1MiB */
438  const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
439 
440  if (ap_count == 0)
441  return CB_SUCCESS;
442 
443  /* The vector is sent as a 4k aligned address in one byte. */
444  sipi_vector = sipi_vector_location >> 12;
445 
446  if (sipi_vector > max_vector_loc) {
447  printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
448  sipi_vector);
449  return CB_ERR;
450  }
451 
452  printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
453 
454  if (lapic_busy()) {
455  printk(BIOS_DEBUG, "Waiting for ICR not to be busy...\n");
456  if (apic_wait_timeout(1000 /* 1 ms */, 50) != CB_SUCCESS) {
457  printk(BIOS_ERR, "timed out. Aborting.\n");
458  return CB_ERR;
459  }
460  printk(BIOS_DEBUG, "done.\n");
461  }
462 
463  /* Send INIT IPI to all but self. */
465 
466  if (!CONFIG(X86_INIT_NEED_1_SIPI)) {
467  printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
468  mdelay(10);
469 
470  /* Send 1st Startup IPI (SIPI) */
471  if (send_sipi_to_aps(ap_count, num_aps, sipi_vector) != CB_SUCCESS)
472  return CB_ERR;
473 
474  /* Wait for CPUs to check in up to 200 us. */
475  wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */);
476  }
477 
478  /* Send final SIPI */
479  if (send_sipi_to_aps(ap_count, num_aps, sipi_vector) != CB_SUCCESS)
480  return CB_ERR;
481 
482  /* Wait for CPUs to check in. */
483  if (wait_for_aps(num_aps, ap_count, 100000 /* 100 ms */, 50 /* us */) != CB_SUCCESS) {
484  printk(BIOS_ERR, "Not all APs checked in: %d/%d.\n",
485  atomic_read(num_aps), ap_count);
486  return CB_ERR;
487  }
488 
489  return CB_SUCCESS;
490 }
491 
492 static enum cb_err bsp_do_flight_plan(struct mp_params *mp_params)
493 {
494  int i;
495  enum cb_err ret = CB_SUCCESS;
496  /*
497  * Set time out for flight plan to a huge minimum value (>=1 second).
498  * CPUs with many APs may take longer if there is contention for
499  * resources such as UART, so scale the time out up by increments of
500  * 100ms if needed.
501  */
502  const int timeout_us = MAX(1000000, 100000 * mp_params->num_cpus);
503  const int step_us = 100;
504  int num_aps = mp_params->num_cpus - 1;
505  struct stopwatch sw;
506 
507  stopwatch_init(&sw);
508 
509  for (i = 0; i < mp_params->num_records; i++) {
510  struct mp_flight_record *rec = &mp_params->flight_plan[i];
511 
512  /* Wait for APs if the record is not released. */
513  if (atomic_read(&rec->barrier) == 0) {
514  /* Wait for the APs to check in. */
515  if (wait_for_aps(&rec->cpus_entered, num_aps,
516  timeout_us, step_us) != CB_SUCCESS) {
517  printk(BIOS_ERR, "MP record %d timeout.\n", i);
518  ret = CB_ERR;
519  }
520  }
521 
522  if (rec->bsp_call != NULL)
523  rec->bsp_call();
524 
525  release_barrier(&rec->barrier);
526  }
527 
528  printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
530  return ret;
531 }
532 
533 static void init_bsp(struct bus *cpu_bus)
534 {
535  struct device_path cpu_path;
536  struct cpu_info *info;
537 
538  /* Print processor name */
540  printk(BIOS_INFO, "CPU: %s.\n", processor_name);
541 
542  /* Ensure the local APIC is enabled */
543  enable_lapic();
545 
546  /* Set the device path of the boot CPU. */
547  cpu_path.type = DEVICE_PATH_APIC;
548  cpu_path.apic.apic_id = lapicid();
549 
550  /* Find the device structure for the boot CPU. */
551  info = cpu_info();
552  info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
553  info->cpu->name = processor_name;
554 
555  if (info->index != 0)
556  printk(BIOS_CRIT, "BSP index(%zd) != 0!\n", info->index);
557 
558  /* Track BSP in cpu_map structures. */
559  cpu_add_map_entry(info->index);
560 }
561 
562 /*
563  * mp_init() will set up the SIPI vector and bring up the APs according to
564  * mp_params. Each flight record will be executed according to the plan. Note
565  * that the MP infrastructure uses SMM default area without saving it. It's
566  * up to the chipset or mainboard to either e820 reserve this area or save this
567  * region prior to calling mp_init() and restoring it after mp_init returns.
568  *
569  * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
570  * caching is enabled before running the flight plan.
571  *
572  * The MP initialization has the following properties:
573  * 1. APs are brought up in parallel.
574  * 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
575  * Therefore, one cannot rely on this property or the order of devices in
576  * the device tree unless the chipset or mainboard know the APIC ids
577  * a priori.
578  */
579 static enum cb_err mp_init(struct bus *cpu_bus, struct mp_params *p)
580 {
581  int num_cpus;
582  atomic_t *ap_count;
583 
584  init_bsp(cpu_bus);
585 
586  if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
587  printk(BIOS_CRIT, "Invalid MP parameters\n");
588  return CB_ERR;
589  }
590 
591  /* We just need to run things on the BSP */
592  if (!CONFIG(SMP))
593  return bsp_do_flight_plan(p);
594 
595  /* Default to currently running CPU. */
596  num_cpus = allocate_cpu_devices(cpu_bus, p);
597 
598  if (num_cpus < p->num_cpus) {
600  "ERROR: More cpus requested (%d) than supported (%d).\n",
601  p->num_cpus, num_cpus);
602  return CB_ERR;
603  }
604 
605  /* Copy needed parameters so that APs have a reference to the plan. */
608 
609  /* Load the SIPI vector. */
610  ap_count = load_sipi_vector(p);
611  if (ap_count == NULL)
612  return CB_ERR;
613 
614  /* Make sure SIPI data hits RAM so the APs that come up will see
615  * the startup code even if the caches are disabled. */
616  wbinvd();
617 
618  /* Start the APs providing number of APs and the cpus_entered field. */
619  global_num_aps = p->num_cpus - 1;
620  if (start_aps(cpu_bus, global_num_aps, ap_count) != CB_SUCCESS) {
621  mdelay(1000);
622  printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
623  atomic_read(ap_count), global_num_aps);
624  return CB_ERR;
625  }
626 
627  /* Walk the flight plan for the BSP. */
628  return bsp_do_flight_plan(p);
629 }
630 
631 /* Calls cpu_initialize(info->index) which calls the coreboot CPU drivers. */
632 static void mp_initialize_cpu(void)
633 {
634  /* Call back into driver infrastructure for the AP initialization. */
635  struct cpu_info *info = cpu_info();
636  cpu_initialize(info->index);
637 }
638 
640 {
641  if (lapic_busy()) {
642  printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
643  if (apic_wait_timeout(1000 /* 1 ms */, 50) != CB_SUCCESS) {
644  printk(BIOS_DEBUG, "timed out. Aborting.\n");
645  return;
646  }
647  printk(BIOS_DEBUG, "done.\n");
648  }
649 
651 
652  if (lapic_busy()) {
653  if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */) != CB_SUCCESS) {
654  printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
655  return;
656  }
657  }
658  printk(BIOS_DEBUG, "Relocation complete.\n");
659 }
660 
661 DECLARE_SPIN_LOCK(smm_relocation_lock);
662 
663 /* Send SMI to self with single user serialization. */
665 {
666  spin_lock(&smm_relocation_lock);
668  spin_unlock(&smm_relocation_lock);
669 }
670 
671 struct mp_state {
672  struct mp_ops ops;
675  size_t perm_smsize;
676  /* Size of the real CPU save state */
678  /* Size of allocated CPU save state, MAX(real save state size, stub size) */
681  int do_smm;
683 
684 static int is_smm_enabled(void)
685 {
686  return CONFIG(HAVE_SMI_HANDLER) && mp_state.do_smm;
687 }
688 
689 static void smm_disable(void)
690 {
691  mp_state.do_smm = 0;
692 }
693 
694 static void smm_enable(void)
695 {
696  if (CONFIG(HAVE_SMI_HANDLER))
697  mp_state.do_smm = 1;
698 }
699 
700 /*
701  * This code is built as part of ramstage, but it actually runs in SMM. This
702  * means that ENV_SMM is 0, but we are actually executing in the environment
703  * setup by the smm_stub.
704  */
705 static void asmlinkage smm_do_relocation(void *arg)
706 {
707  const struct smm_module_params *p;
708  int cpu;
709  const uintptr_t curr_smbase = SMM_DEFAULT_BASE;
710  uintptr_t perm_smbase;
711 
712  p = arg;
713  cpu = p->cpu;
714 
715  if (cpu >= CONFIG_MAX_CPUS) {
717  "Invalid CPU number assigned in SMM stub: %d\n", cpu);
718  return;
719  }
720 
721  /*
722  * The permanent handler runs with all cpus concurrently. Precalculate
723  * the location of the new SMBASE. If using SMM modules then this
724  * calculation needs to match that of the module loader.
725  */
726  perm_smbase = smm_get_cpu_smbase(cpu);
727  if (!perm_smbase) {
728  printk(BIOS_ERR, "%s: bad SMBASE for CPU %d\n", __func__, cpu);
729  return;
730  }
731 
732  /* Setup code checks this callback for validity. */
733  printk(BIOS_INFO, "%s : curr_smbase 0x%x perm_smbase 0x%x, cpu = %d\n",
734  __func__, (int)curr_smbase, (int)perm_smbase, cpu);
735  mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
736 
737  if (CONFIG(STM)) {
738  uintptr_t mseg;
739 
740  mseg = mp_state.perm_smbase +
741  (mp_state.perm_smsize - CONFIG_MSEG_SIZE);
742 
743  stm_setup(mseg, p->cpu,
744  perm_smbase,
747  }
748 }
749 
750 static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
751 {
752  int i;
753  struct smm_stub_params *stub_params = smm_params->stub_params;
754 
755  for (i = 0; i < CONFIG_MAX_CPUS; i++)
756  stub_params->apic_id_to_cpu[i] = cpu_get_apic_id(i);
757 }
758 
759 static enum cb_err install_relocation_handler(int num_cpus, size_t real_save_state_size,
760  size_t save_state_size)
761 {
762  struct smm_loader_params smm_params = {
763  .num_cpus = num_cpus,
764  .real_cpu_save_state_size = real_save_state_size,
765  .per_cpu_save_state_size = save_state_size,
766  .num_concurrent_save_states = 1,
767  .handler = smm_do_relocation,
768  };
769 
770  if (smm_setup_relocation_handler(&smm_params)) {
771  printk(BIOS_ERR, "%s: smm setup failed\n", __func__);
772  return CB_ERR;
773  }
774  adjust_smm_apic_id_map(&smm_params);
775 
777 
778  return CB_SUCCESS;
779 }
780 
781 static enum cb_err install_permanent_handler(int num_cpus, uintptr_t smbase,
782  size_t smsize, size_t real_save_state_size,
783  size_t save_state_size)
784 {
785  /*
786  * All the CPUs will relocate to permanaent handler now. Set parameters
787  * needed for all CPUs. The placement of each CPUs entry point is
788  * determined by the loader. This code simply provides the beginning of
789  * SMRAM region, the number of CPUs who will use the handler, the stack
790  * size and save state size for each CPU.
791  */
792  struct smm_loader_params smm_params = {
793  .num_cpus = num_cpus,
794  .real_cpu_save_state_size = real_save_state_size,
795  .per_cpu_save_state_size = save_state_size,
796  .num_concurrent_save_states = num_cpus,
797  };
798 
799  printk(BIOS_DEBUG, "Installing permanent SMM handler to 0x%08lx\n", smbase);
800 
801  if (smm_load_module(smbase, smsize, &smm_params))
802  return CB_ERR;
803 
804  adjust_smm_apic_id_map(&smm_params);
805 
806  return CB_SUCCESS;
807 }
808 
809 /* Load SMM handlers as part of MP flight record. */
810 static void load_smm_handlers(void)
811 {
812  size_t real_save_state_size = mp_state.smm_real_save_state_size;
813  size_t smm_save_state_size = mp_state.smm_save_state_size;
814 
815  /* Do nothing if SMM is disabled.*/
816  if (!is_smm_enabled())
817  return;
818 
820  CONFIG_SMM_MODULE_STACK_SIZE)) {
821  printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
822  smm_disable();
823  }
824 
825  /* Install handlers. */
826  if (install_relocation_handler(mp_state.cpu_count, real_save_state_size,
827  smm_save_state_size) != CB_SUCCESS) {
828  printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
829  smm_disable();
830  }
831 
833  mp_state.perm_smsize, real_save_state_size,
834  smm_save_state_size) != CB_SUCCESS) {
835  printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
836  smm_disable();
837  }
838 
839  /* Ensure the SMM handlers hit DRAM before performing first SMI. */
840  wbinvd();
841 
842  /*
843  * Indicate that the SMM handlers have been loaded and MP
844  * initialization is about to start.
845  */
848 }
849 
850 /* Trigger SMM as part of MP flight record. */
851 static void trigger_smm_relocation(void)
852 {
853  /* Do nothing if SMM is disabled.*/
855  return;
856  /* Trigger SMM mode for the currently running processor. */
858 }
859 
860 static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
861 
862 static struct mp_callback *read_callback(struct mp_callback **slot)
863 {
864  struct mp_callback *ret;
865 
866  asm volatile ("mov %1, %0\n"
867  : "=r" (ret)
868  : "m" (*slot)
869  : "memory"
870  );
871  return ret;
872 }
873 
874 static void store_callback(struct mp_callback **slot, struct mp_callback *val)
875 {
876  asm volatile ("mov %1, %0\n"
877  : "=m" (*slot)
878  : "r" (val)
879  : "memory"
880  );
881 }
882 
883 static enum cb_err run_ap_work(struct mp_callback *val, long expire_us)
884 {
885  int i;
886  int cpus_accepted;
887  struct stopwatch sw;
888  int cur_cpu;
889 
890  if (!CONFIG(PARALLEL_MP_AP_WORK)) {
891  printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n");
892  return CB_ERR;
893  }
894 
895  cur_cpu = cpu_index();
896 
897  if (cur_cpu < 0) {
898  printk(BIOS_ERR, "Invalid CPU index.\n");
899  return CB_ERR;
900  }
901 
902  /* Signal to all the APs to run the func. */
903  for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
904  if (cur_cpu == i)
905  continue;
907  }
908  mfence();
909 
910  /* Wait for all the APs to signal back that call has been accepted. */
911  if (expire_us > 0)
912  stopwatch_init_usecs_expire(&sw, expire_us);
913 
914  do {
915  cpus_accepted = 0;
916 
917  for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
918  if (cur_cpu == i)
919  continue;
920  if (read_callback(&ap_callbacks[i]) == NULL)
921  cpus_accepted++;
922  }
923 
924  if (cpus_accepted == global_num_aps)
925  return CB_SUCCESS;
926  } while (expire_us <= 0 || !stopwatch_expired(&sw));
927 
928  printk(BIOS_CRIT, "CRITICAL ERROR: AP call expired. %d/%d CPUs accepted.\n",
929  cpus_accepted, global_num_aps);
930  return CB_ERR;
931 }
932 
933 static void ap_wait_for_instruction(void)
934 {
935  struct mp_callback lcb;
936  struct mp_callback **per_cpu_slot;
937  int cur_cpu;
938 
939  if (!CONFIG(PARALLEL_MP_AP_WORK))
940  return;
941 
942  cur_cpu = cpu_index();
943 
944  if (cur_cpu < 0) {
945  printk(BIOS_ERR, "Invalid CPU index.\n");
946  return;
947  }
948 
949  per_cpu_slot = &ap_callbacks[cur_cpu];
950 
951  while (1) {
952  struct mp_callback *cb = read_callback(per_cpu_slot);
953 
954  if (cb == NULL) {
955  asm ("pause");
956  continue;
957  }
958 
959  /* Copy to local variable before signaling consumption. */
960  memcpy(&lcb, cb, sizeof(lcb));
961  mfence();
962  store_callback(per_cpu_slot, NULL);
963  if (lcb.logical_cpu_number && (cur_cpu !=
964  lcb.logical_cpu_number))
965  continue;
966  else
967  lcb.func(lcb.arg);
968  }
969 }
970 
971 enum cb_err mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
972  long expire_us)
973 {
974  struct mp_callback lcb = { .func = func, .arg = arg,
975  .logical_cpu_number = logical_cpu_num};
976  return run_ap_work(&lcb, expire_us);
977 }
978 
979 enum cb_err mp_run_on_all_aps(void (*func)(void *), void *arg, long expire_us,
980  bool run_parallel)
981 {
982  int ap_index, bsp_index;
983 
984  if (run_parallel)
985  return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, expire_us);
986 
987  bsp_index = cpu_index();
988 
989  const int total_threads = global_num_aps + 1; /* +1 for BSP */
990 
991  for (ap_index = 0; ap_index < total_threads; ap_index++) {
992  /* skip if BSP */
993  if (ap_index == bsp_index)
994  continue;
995  if (mp_run_on_aps(func, arg, ap_index, expire_us) != CB_SUCCESS)
996  return CB_ERR;
997  }
998 
999  return CB_SUCCESS;
1000 }
1001 
1002 enum cb_err mp_run_on_all_cpus(void (*func)(void *), void *arg)
1003 {
1004  /* Run on BSP first. */
1005  func(arg);
1006 
1007  /* For up to 1 second for AP to finish previous work. */
1009 }
1010 
1011 enum cb_err mp_park_aps(void)
1012 {
1013  struct stopwatch sw;
1014  enum cb_err ret;
1015  long duration_msecs;
1016 
1017  stopwatch_init(&sw);
1018 
1020  1000 * USECS_PER_MSEC);
1021 
1022  duration_msecs = stopwatch_duration_msecs(&sw);
1023 
1024  if (ret == CB_SUCCESS)
1025  printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
1026  duration_msecs);
1027  else
1028  printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
1029  duration_msecs);
1030 
1031  return ret;
1032 }
1033 
1034 static struct mp_flight_record mp_steps[] = {
1035  /* Once the APs are up load the SMM handlers. */
1037  /* Perform SMM relocation. */
1039  /* Initialize each CPU through the driver framework. */
1041  /* Wait for APs to finish then optionally start looking for work. */
1043 };
1044 
1045 static size_t smm_stub_size(void)
1046 {
1047  extern unsigned char _binary_smmstub_start[];
1048  struct rmodule smm_stub;
1049 
1050  if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) {
1051  printk(BIOS_ERR, "%s: unable to get SMM module size\n", __func__);
1052  return 0;
1053  }
1054 
1055  return rmodule_memory_size(&smm_stub);
1056 }
1057 
1058 static void fill_mp_state_smm(struct mp_state *state, const struct mp_ops *ops)
1059 {
1060  if (ops->get_smm_info != NULL)
1061  ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
1062  &state->smm_real_save_state_size);
1063 
1064  state->smm_save_state_size = MAX(state->smm_real_save_state_size, smm_stub_size());
1065 
1066  /*
1067  * Make sure there is enough room for the SMM descriptor
1068  */
1069  if (CONFIG(STM)) {
1070  state->smm_save_state_size +=
1071  ALIGN_UP(sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR), 0x100);
1072  }
1073 
1074  /*
1075  * Default to smm_initiate_relocation() if trigger callback isn't
1076  * provided.
1077  */
1078  if (ops->per_cpu_smm_trigger == NULL)
1080 }
1081 
1082 static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
1083 {
1084  /*
1085  * Make copy of the ops so that defaults can be set in the non-const
1086  * structure if needed.
1087  */
1088  memcpy(&state->ops, ops, sizeof(*ops));
1089 
1090  if (ops->get_cpu_count != NULL)
1091  state->cpu_count = ops->get_cpu_count();
1092 
1093  if (CONFIG(HAVE_SMI_HANDLER))
1095 }
1096 
1097 static enum cb_err do_mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1098 {
1099  enum cb_err ret;
1100  void *default_smm_area;
1101  struct mp_params mp_params;
1102 
1103  if (mp_ops->pre_mp_init != NULL)
1104  mp_ops->pre_mp_init();
1105 
1107 
1108  memset(&mp_params, 0, sizeof(mp_params));
1109 
1110  if (mp_state.cpu_count <= 0) {
1111  printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
1112  return CB_ERR;
1113  }
1114 
1115  /* Sanity check SMM state. */
1116  if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
1118  smm_enable();
1119 
1120  if (is_smm_enabled())
1121  printk(BIOS_INFO, "Will perform SMM setup.\n");
1122 
1124  /* Gather microcode information. */
1130 
1131  /* Perform backup of default SMM area. */
1133 
1134  ret = mp_init(cpu_bus, &mp_params);
1135 
1137 
1138  /* Signal callback on success if it's provided. */
1139  if (ret == CB_SUCCESS && mp_state.ops.post_mp_init != NULL)
1141 
1142  return ret;
1143 }
1144 
1145 enum cb_err mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
1146 {
1147  enum cb_err ret = do_mp_init_with_smm(cpu_bus, mp_ops);
1148 
1149  if (ret != CB_SUCCESS)
1150  printk(BIOS_ERR, "MP initialization failure.\n");
1151 
1152  return ret;
1153 }
void stm_setup(uintptr_t mseg, int cpu, uintptr_t smbase, uintptr_t smbase_base, uint32_t offset32)
#define asmlinkage
Definition: cpu.h:8
#define atomic_read(v)
Definition: atomic.h:14
#define atomic_set(v, val)
Definition: atomic.h:13
int cpu_get_apic_id(int logical_cpu)
Definition: cpu.c:223
int cpu_index(void)
Definition: cpu.c:332
void cpu_initialize(unsigned int index)
Definition: cpu.c:231
void cpu_add_map_entry(unsigned int index)
Definition: cpu.c:217
static struct cpu_info * cpu_info(void)
Definition: cpu.h:252
static bool cpu_is_intel(void)
Definition: cpu.h:209
void * memcpy(void *dest, const void *src, size_t n)
Definition: memcpy.c:7
void * memset(void *dstpp, int c, size_t len)
Definition: memset.c:12
void restore_default_smm_area(void *smm_save_area)
void * backup_default_smm_area(void)
#define ARRAY_SIZE(a)
Definition: helpers.h:12
#define ALIGN_DOWN(x, a)
Definition: helpers.h:18
#define MAX(a, b)
Definition: helpers.h:40
#define ALIGN_UP(x, a)
Definition: helpers.h:17
cb_err
coreboot error codes
Definition: cb_err.h:15
@ CB_ERR
Generic error code.
Definition: cb_err.h:17
@ CB_SUCCESS
Call completed successfully.
Definition: cb_err.h:16
#define printk(level,...)
Definition: stdlib.h:16
static struct saved_msr * save_msr(int index, struct saved_msr *entry)
Definition: mp_init.c:229
struct sipi_params __packed
static char processor_name[49]
Definition: mp_init.c:37
static atomic_t * load_sipi_vector(struct mp_params *mp_params)
Definition: mp_init.c:282
static const uintptr_t sipi_vector_location
Definition: mp_init.c:115
struct mp_state mp_state
enum cb_err mp_park_aps(void)
Definition: mp_init.c:1011
static enum cb_err mp_init(struct bus *cpu_bus, struct mp_params *p)
Definition: mp_init.c:579
static void smm_enable(void)
Definition: mp_init.c:694
static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
Definition: mp_init.c:750
static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS]
Definition: mp_init.c:222
#define MP_FR_BLOCK_APS(ap_func_, bsp_func_)
Definition: mp_init.c:67
static struct device * cpus_dev[CONFIG_MAX_CPUS]
Definition: mp_init.c:127
static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
Definition: mp_init.c:356
static struct mp_flight_record mp_steps[]
Definition: mp_init.c:1034
static void release_barrier(atomic_t *b)
Definition: mp_init.c:136
enum cb_err mp_run_on_all_aps(void(*func)(void *), void *arg, long expire_us, bool run_parallel)
Definition: mp_init.c:979
static void load_smm_handlers(void)
Definition: mp_init.c:810
enum cb_err mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
Definition: mp_init.c:1145
static void smm_disable(void)
Definition: mp_init.c:689
static enum cb_err send_sipi_to_aps(int ap_count, atomic_t *num_aps, int sipi_vector)
Definition: mp_init.c:413
static void trigger_smm_relocation(void)
Definition: mp_init.c:851
static void fill_mp_state_smm(struct mp_state *state, const struct mp_ops *ops)
Definition: mp_init.c:1058
DECLARE_SPIN_LOCK(smm_relocation_lock)
static void asmlinkage smm_do_relocation(void *arg)
Definition: mp_init.c:705
static int global_num_aps
Definition: mp_init.c:123
static enum cb_err do_mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
Definition: mp_init.c:1097
static void mp_initialize_cpu(void)
Definition: mp_init.c:632
static void setup_default_sipi_vector_params(struct sipi_params *sp)
Definition: mp_init.c:210
static int save_bsp_msrs(char *start, int size)
Definition: mp_init.c:243
char _binary_sipi_vector_start[]
static void store_callback(struct mp_callback **slot, struct mp_callback *val)
Definition: mp_init.c:874
static void asmlinkage ap_init(void)
Definition: mp_init.c:181
enum cb_err mp_run_on_all_cpus(void(*func)(void *), void *arg)
Definition: mp_init.c:1002
#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_)
Definition: mp_init.c:70
static const int sipi_vector_location_size
Definition: mp_init.c:116
enum cb_err mp_run_on_aps(void(*func)(void *), void *arg, int logical_cpu_num, long expire_us)
Definition: mp_init.c:971
static size_t smm_stub_size(void)
Definition: mp_init.c:1045
static struct mp_flight_plan mp_info
Definition: mp_init.c:124
static enum cb_err install_permanent_handler(int num_cpus, uintptr_t smbase, size_t smsize, size_t real_save_state_size, size_t save_state_size)
Definition: mp_init.c:781
static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
Definition: mp_init.c:1082
static void ap_wait_for_instruction(void)
Definition: mp_init.c:933
static enum cb_err start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
Definition: mp_init.c:434
#define NUM_FIXED_MTRRS
Definition: mp_init.c:221
static enum cb_err bsp_do_flight_plan(struct mp_params *mp_params)
Definition: mp_init.c:492
static void ap_do_flight_plan(void)
Definition: mp_init.c:159
static enum cb_err install_relocation_handler(int num_cpus, size_t real_save_state_size, size_t save_state_size)
Definition: mp_init.c:759
static struct mp_callback * ap_callbacks[CONFIG_MAX_CPUS]
Definition: mp_init.c:860
static struct mp_callback * read_callback(struct mp_callback **slot)
Definition: mp_init.c:862
static enum cb_err apic_wait_timeout(int total_delay, int delay_step)
Definition: mp_init.c:395
static enum cb_err wait_for_aps(atomic_t *val, int target, int total_delay, int delay_step)
Definition: mp_init.c:142
struct mp_params __aligned
static void init_bsp(struct bus *cpu_bus)
Definition: mp_init.c:533
static enum cb_err run_ap_work(struct mp_callback *val, long expire_us)
Definition: mp_init.c:883
void smm_initiate_relocation(void)
Definition: mp_init.c:664
static void barrier_wait(atomic_t *b)
Definition: mp_init.c:129
static void park_this_cpu(void *unused)
Definition: mp_init.c:174
static int is_smm_enabled(void)
Definition: mp_init.c:684
void smm_initiate_relocation_parallel(void)
Definition: mp_init.c:639
void fixed_mtrrs_hide_amd_rwdram(void)
Definition: mtrr.c:75
void fixed_mtrrs_expose_amd_rwdram(void)
Definition: mtrr.c:63
void mdelay(unsigned int msecs)
Definition: delay.c:2
struct device * alloc_find_dev(struct bus *parent, struct device_path *path)
See if a device structure already exists and if not allocate it.
Definition: device.c:138
static struct smmstore_params_info info
Definition: ramstage.c:12
@ CONFIG
Definition: dsi_common.h:201
char gdt[]
char per_cpu_segment_descriptors[]
char gdt_end[]
uint32_t per_cpu_segment_selector
static void wbinvd(void)
Definition: cache.h:15
static __always_inline msr_t rdmsr(unsigned int index)
Definition: msr.h:146
#define SMM_DEFAULT_SIZE
Definition: smm.h:11
#define SMM_DEFAULT_BASE
Definition: smm.h:10
#define atomic_inc(v)
atomic_inc - increment atomic variable
Definition: atomic.h:41
#define spin_lock(lock)
Definition: spinlock.h:11
#define spin_unlock(lock)
Definition: spinlock.h:12
static int stopwatch_expired(struct stopwatch *sw)
Definition: timer.h:152
static void stopwatch_init(struct stopwatch *sw)
Definition: timer.h:117
static long stopwatch_duration_msecs(struct stopwatch *sw)
Definition: timer.h:182
#define USECS_PER_MSEC
Definition: timer.h:10
static void stopwatch_init_usecs_expire(struct stopwatch *sw, long us)
Definition: timer.h:127
static struct device_operations ops
Definition: ipmi_kcs_ops.c:416
void setup_lapic_interrupts(void)
Definition: lapic.c:68
void enable_lapic(void)
Definition: lapic.c:12
static __always_inline unsigned int lapicid(void)
Definition: lapic.h:136
static __always_inline void lapic_send_ipi_others(uint32_t icrlow)
Definition: lapic.h:160
static __always_inline int lapic_busy(void)
Definition: lapic.h:118
static __always_inline void lapic_send_ipi_self(uint32_t icrlow)
Definition: lapic.h:146
void stop_this_cpu(void)
Normally this function is defined in lapic.h as an always inline function that just keeps the CPU in ...
#define LAPIC_DM_INIT
Definition: lapic_def.h:48
#define LAPIC_DM_SMI
Definition: lapic_def.h:45
#define LAPIC_DM_STARTUP
Definition: lapic_def.h:49
#define LAPIC_INT_ASSERT
Definition: lapic_def.h:40
struct bootblock_arg arg
Definition: decompressor.c:22
#define BIOS_INFO
BIOS_INFO - Expected events.
Definition: loglevel.h:113
#define BIOS_DEBUG
BIOS_DEBUG - Verbose output.
Definition: loglevel.h:128
#define BIOS_CRIT
BIOS_CRIT - Recovery unlikely.
Definition: loglevel.h:56
#define BIOS_ERR
BIOS_ERR - System in incomplete state.
Definition: loglevel.h:72
uint32_t get_current_microcode_rev(void)
Definition: microcode.c:112
@ MP_RUN_ON_ALL_CPUS
Definition: mp.h:94
static void mfence(void)
Definition: mp.h:14
#define CACHELINE_SIZE
Definition: mp.h:9
void fill_processor_name(char *processor_name)
Definition: name.c:8
state
Definition: raminit.c:1787
@ DEVICE_PATH_APIC
Definition: path.h:12
int rmodule_entry_offset(const struct rmodule *m)
Definition: rmodule.c:80
int rmodule_parse(void *ptr, struct rmodule *m)
Definition: rmodule.c:31
void * rmodule_parameters(const struct rmodule *m)
Definition: rmodule.c:68
int rmodule_memory_size(const struct rmodule *m)
Definition: rmodule.c:63
int rmodule_load(void *loc, struct rmodule *m)
Definition: rmodule.c:171
int rmodule_load_alignment(const struct rmodule *m)
Definition: rmodule.c:162
unsigned char _binary_smmstub_start[]
int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size, const unsigned int total_cpus, const size_t stack_size)
int smm_setup_relocation_handler(struct smm_loader_params *params)
u32 smm_get_cpu_smbase(unsigned int cpu_num)
int smm_load_module(const uintptr_t smram_base, const size_t smram_size, struct smm_loader_params *params)
static void * default_smm_area
Definition: smi.c:213
#define NULL
Definition: stddef.h:19
unsigned short uint16_t
Definition: stdint.h:11
unsigned int uint32_t
Definition: stdint.h:14
unsigned long uintptr_t
Definition: stdint.h:21
Definition: atomic.h:8
Definition: device.h:76
Definition: cpu.h:230
Definition: path.h:87
Definition: device.h:107
void * arg
Definition: mp_init.c:33
int logical_cpu_number
Definition: mp_init.c:34
void(* func)(void *)
Definition: mp_init.c:32
struct mp_flight_record * records
Definition: mp_init.c:120
int num_records
Definition: mp_init.c:119
atomic_t cpus_entered
Definition: mp_init.c:54
atomic_t barrier
Definition: mp_init.c:53
void(* ap_call)(void)
Definition: mp_init.c:55
void(* bsp_call)(void)
Definition: mp_init.c:56
Definition: mp.h:20
void(* pre_mp_init)(void)
Definition: mp.h:27
void(* get_microcode_info)(const void **microcode, int *parallel)
Definition: mp.h:46
void(* relocation_handler)(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
Definition: mp.h:66
void(* post_mp_init)(void)
Definition: mp.h:72
void(* per_cpu_smm_trigger)(void)
Definition: mp.h:57
void(* pre_mp_smm_init)(void)
Definition: mp.h:52
int parallel_microcode_load
Definition: mp_init.c:77
const void * microcode_pointer
Definition: mp_init.c:78
struct mp_flight_record * flight_plan
Definition: mp_init.c:80
int num_cpus
Definition: mp_init.c:76
int num_records
Definition: mp_init.c:81
int do_smm
Definition: mp_init.c:681
uintptr_t perm_smbase
Definition: mp_init.c:674
size_t perm_smsize
Definition: mp_init.c:675
int cpu_count
Definition: mp_init.c:673
size_t smm_save_state_size
Definition: mp_init.c:679
size_t smm_real_save_state_size
Definition: mp_init.c:677
struct mp_ops ops
Definition: mp_init.c:672
uintptr_t reloc_start32_offset
Definition: mp_init.c:680
unsigned int hi
Definition: msr.h:112
unsigned int lo
Definition: msr.h:111
uint32_t hi
Definition: mp_init.c:106
uint32_t lo
Definition: mp_init.c:105
uint32_t index
Definition: mp_init.c:104
uint32_t microcode_ptr
Definition: mp_init.c:95
uint32_t microcode_lock
Definition: mp_init.c:94
uint32_t msr_count
Definition: mp_init.c:97
uint16_t gdtlimit
Definition: mp_init.c:86
uint32_t stack_size
Definition: mp_init.c:93
uint32_t stack_top
Definition: mp_init.c:92
uint32_t per_cpu_segment_selector
Definition: mp_init.c:91
uint32_t gdt
Definition: mp_init.c:87
atomic_t ap_count
Definition: mp_init.c:99
uint32_t c_handler
Definition: mp_init.c:98
uint32_t msr_table_ptr
Definition: mp_init.c:96
uint32_t per_cpu_segment_descriptors
Definition: mp_init.c:90
uint32_t idt_ptr
Definition: mp_init.c:89
uint16_t unused
Definition: mp_init.c:88
struct smm_stub_params * stub_params
Definition: smm.h:148
size_t num_cpus
Definition: smm.h:140
size_t cpu
Definition: smm.h:73
u32 start32_offset
Definition: smm.h:96
u16 apic_id_to_cpu[CONFIG_MAX_CPUS]
Definition: smm.h:94
u8 val
Definition: sys.c:300
void udelay(uint32_t us)
Definition: udelay.c:15
struct lidtarg idtarg
Definition: exception.c:621
#define MTRR_FIX_4K_E0000
Definition: mtrr.h:52
#define MTRR_FIX_64K_00000
Definition: mtrr.h:45
#define MTRR_PHYS_BASE(reg)
Definition: mtrr.h:39
#define MTRR_PHYS_MASK(reg)
Definition: mtrr.h:40
static int get_var_mtrr_count(void)
Definition: mtrr.h:105
#define MTRR_FIX_4K_D8000
Definition: mtrr.h:51
#define MTRR_FIX_4K_C8000
Definition: mtrr.h:49
#define MTRR_FIX_4K_E8000
Definition: mtrr.h:53
#define MTRR_FIX_16K_A0000
Definition: mtrr.h:47
#define MTRR_FIX_4K_D0000
Definition: mtrr.h:50
#define MTRR_FIX_4K_F0000
Definition: mtrr.h:54
#define MTRR_FIX_4K_C0000
Definition: mtrr.h:48
#define MTRR_FIX_16K_80000
Definition: mtrr.h:46
#define MTRR_FIX_4K_F8000
Definition: mtrr.h:55
#define MTRR_DEF_TYPE_MSR
Definition: mtrr.h:25
typedef void(X86APIP X86EMU_intrFuncs)(int num)