coreboot
coreboot is an Open Source project aimed at replacing the proprietary BIOS found in most computers.
mtrr.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 /*
4  * mtrr.c: setting MTRR to decent values for cache initialization on P6
5  * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
6  *
7  * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
8  * Programming
9  */
10 
11 #include <stddef.h>
12 #include <string.h>
13 #include <bootstate.h>
14 #include <commonlib/helpers.h>
15 #include <console/console.h>
16 #include <device/device.h>
17 #include <device/pci_ids.h>
18 #include <cpu/cpu.h>
19 #include <cpu/x86/msr.h>
20 #include <cpu/x86/mtrr.h>
21 #include <cpu/x86/cache.h>
22 #include <memrange.h>
23 #include <cpu/amd/mtrr.h>
24 #include <assert.h>
25 #if CONFIG(X86_AMD_FIXED_MTRRS)
26 #define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
27 #else
28 #define MTRR_FIXED_WRBACK_BITS 0
29 #endif
30 
31 #define MIN_MTRRS 8
32 
33 /*
34  * Static storage size for variable MTRRs. It's sized sufficiently large to
35  * handle different types of CPUs. Empirically, 16 variable MTRRs has not
36  * yet been observed.
37  */
38 #define NUM_MTRR_STATIC_STORAGE 16
39 
40 static int total_mtrrs;
41 
42 static void detect_var_mtrrs(void)
43 {
45 
48  "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
51  }
52 }
53 
55 {
56  msr_t msr;
57 
58  msr = rdmsr(MTRR_DEF_TYPE_MSR);
61 }
62 
64 {
65  msr_t syscfg;
66 
67  if (!CONFIG(X86_AMD_FIXED_MTRRS))
68  return;
69 
70  syscfg = rdmsr(SYSCFG_MSR);
72  wrmsr(SYSCFG_MSR, syscfg);
73 }
74 
76 {
77  msr_t syscfg;
78 
79  if (!CONFIG(X86_AMD_FIXED_MTRRS))
80  return;
81 
82  syscfg = rdmsr(SYSCFG_MSR);
84  wrmsr(SYSCFG_MSR, syscfg);
85 }
86 
87 static void enable_var_mtrr(unsigned char deftype)
88 {
89  msr_t msr;
90 
91  msr = rdmsr(MTRR_DEF_TYPE_MSR);
92  msr.lo &= ~0xff;
93  msr.lo |= MTRR_DEF_TYPE_EN | deftype;
95 }
96 
97 #define MTRR_VERBOSE_LEVEL BIOS_NEVER
98 
99 /* MTRRs are at a 4KiB granularity. */
100 #define RANGE_SHIFT 12
101 #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
102  (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
103 #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
104 #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
105 #define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
106 
107 /* Helpful constants. */
108 #define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
109 #define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
110 
111 #define MTRR_ALGO_SHIFT (8)
112 #define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
113 
115 {
117 }
118 
120 {
122 }
123 
124 static inline int range_entry_mtrr_type(struct range_entry *r)
125 {
126  return range_entry_tag(r) & MTRR_TAG_MASK;
127 }
128 
129 static int filter_vga_wrcomb(struct device *dev, struct resource *res)
130 {
131  /* Only handle PCI devices. */
132  if (dev->path.type != DEVICE_PATH_PCI)
133  return 0;
134 
135  /* Only handle VGA class devices. */
136  if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
137  return 0;
138 
139  /* Add resource as write-combining in the address space. */
140  return 1;
141 }
142 
143 static void print_physical_address_space(const struct memranges *addr_space,
144  const char *identifier)
145 {
146  const struct range_entry *r;
147 
148  if (identifier)
149  printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
150  identifier);
151  else
152  printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
153 
154  memranges_each_entry(r, addr_space)
156  "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
159 }
160 
162 {
163  static struct memranges *addr_space;
164  static struct memranges addr_space_storage;
165 
166  /* In order to handle some chipsets not being able to pre-determine
167  * uncacheable ranges, such as graphics memory, at resource insertion
168  * time remove uncacheable regions from the cacheable ones. */
169  if (addr_space == NULL) {
170  unsigned long mask;
171  unsigned long match;
172 
173  addr_space = &addr_space_storage;
174 
176  /* Collect cacheable and uncacheable address ranges. The
177  * uncacheable regions take precedence over the cacheable
178  * regions. */
179  memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
180  memranges_add_resources(addr_space, mask, 0,
182 
183  /* Handle any write combining resources. Only prefetchable
184  * resources are appropriate for this MTRR type. */
185  match = IORESOURCE_PREFETCH;
186  mask |= match;
187  memranges_add_resources_filter(addr_space, mask, match,
189 
190  /* The address space below 4GiB is special. It needs to be
191  * covered entirely by range entries so that MTRR calculations
192  * can be properly done for the full 32-bit address space.
193  * Therefore, ensure holes are filled up to 4GiB as
194  * uncacheable */
195  memranges_fill_holes_up_to(addr_space,
198 
199  print_physical_address_space(addr_space, NULL);
200  }
201 
202  return addr_space;
203 }
204 
205 /* Fixed MTRR descriptor. This structure defines the step size and begin
206  * and end (exclusive) address covered by a set of fixed MTRR MSRs.
207  * It also describes the offset in byte intervals to store the calculated MTRR
208  * type in an array. */
215 };
216 
217 /* Shared MTRR calculations. Can be reused by APs. */
219 
220 /* Fixed MTRR descriptors. */
221 static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
222  { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
223  PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
224  { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
225  PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
226  { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
227  PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
228 };
229 
230 static void calc_fixed_mtrrs(void)
231 {
232  static int fixed_mtrr_types_initialized;
233  struct memranges *phys_addr_space;
234  struct range_entry *r;
235  const struct fixed_mtrr_desc *desc;
236  const struct fixed_mtrr_desc *last_desc;
237  uint32_t begin;
238  uint32_t end;
239  int type_index;
240 
241  if (fixed_mtrr_types_initialized)
242  return;
243 
244  phys_addr_space = get_physical_address_space();
245 
246  /* Set all fixed ranges to uncacheable first. */
248 
249  desc = &fixed_mtrr_desc[0];
250  last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
251 
252  memranges_each_entry(r, phys_addr_space) {
255 
256  if (begin >= last_desc->end)
257  break;
258 
259  if (end > last_desc->end)
260  end = last_desc->end;
261 
262  /* Get to the correct fixed mtrr descriptor. */
263  while (begin >= desc->end)
264  desc++;
265 
266  type_index = desc->range_index;
267  type_index += (begin - desc->begin) / desc->step;
268 
269  while (begin != end) {
270  unsigned char type;
271 
272  type = range_entry_tag(r);
274  "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
275  begin, begin + desc->step - 1, type, type_index);
276  if (type == MTRR_TYPE_WRBACK)
278  fixed_mtrr_types[type_index] = type;
279  type_index++;
280  begin += desc->step;
281  if (begin == desc->end)
282  desc++;
283  }
284  }
285  fixed_mtrr_types_initialized = 1;
286 }
287 
288 static void commit_fixed_mtrrs(void)
289 {
290  int i;
291  int j;
292  int msr_num;
293  int type_index;
294  /* 8 ranges per msr. */
295  msr_t fixed_msrs[NUM_FIXED_MTRRS];
296  unsigned long msr_index[NUM_FIXED_MTRRS];
297 
299 
300  memset(&fixed_msrs, 0, sizeof(fixed_msrs));
301 
302  msr_num = 0;
303  type_index = 0;
304  for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
305  const struct fixed_mtrr_desc *desc;
306  int num_ranges;
307 
308  desc = &fixed_mtrr_desc[i];
309  num_ranges = (desc->end - desc->begin) / desc->step;
310  for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
311  msr_index[msr_num] = desc->msr_index_base +
312  (j / RANGES_PER_FIXED_MTRR);
313  fixed_msrs[msr_num].lo |=
314  fixed_mtrr_types[type_index++] << 0;
315  fixed_msrs[msr_num].lo |=
316  fixed_mtrr_types[type_index++] << 8;
317  fixed_msrs[msr_num].lo |=
318  fixed_mtrr_types[type_index++] << 16;
319  fixed_msrs[msr_num].lo |=
320  fixed_mtrr_types[type_index++] << 24;
321  fixed_msrs[msr_num].hi |=
322  fixed_mtrr_types[type_index++] << 0;
323  fixed_msrs[msr_num].hi |=
324  fixed_mtrr_types[type_index++] << 8;
325  fixed_msrs[msr_num].hi |=
326  fixed_mtrr_types[type_index++] << 16;
327  fixed_msrs[msr_num].hi |=
328  fixed_mtrr_types[type_index++] << 24;
329  msr_num++;
330  }
331  }
332 
333  /* Ensure that both arrays were fully initialized */
334  ASSERT(msr_num == NUM_FIXED_MTRRS)
335 
336  for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
337  printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
338  msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
339 
340  disable_cache();
341  for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
342  wrmsr(msr_index[i], fixed_msrs[i]);
343  enable_cache();
345 
346 }
347 
349 {
352 }
353 
355 {
357 
358  printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
360 }
361 
365 };
366 
369  int num_used;
371 };
372 
373 /* Global storage for variable MTRR solution. */
375 
378  int above4gb;
384 };
385 
386 static void clear_var_mtrr(int index)
387 {
388  msr_t msr = { .lo = 0, .hi = 0 };
389 
390  wrmsr(MTRR_PHYS_BASE(index), msr);
391  wrmsr(MTRR_PHYS_MASK(index), msr);
392 }
393 
394 static int get_os_reserved_mtrrs(void)
395 {
396  return CONFIG(RESERVE_MTRRS_FOR_OS) ? 2 : 0;
397 }
398 
399 static void prep_var_mtrr(struct var_mtrr_state *var_state,
400  uint64_t base, uint64_t size, int mtrr_type)
401 {
402  struct var_mtrr_regs *regs;
403  resource_t rbase;
404  resource_t rsize;
406 
407  if (var_state->mtrr_index >= total_mtrrs) {
408  printk(BIOS_ERR, "Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
409  var_state->mtrr_index, total_mtrrs);
410  return;
411  }
412 
413  /*
414  * If desired, 2 variable MTRRs are attempted to be saved for the OS to
415  * use. However, it's more important to try to map the full address
416  * space properly.
417  */
418  if (var_state->mtrr_index >= total_mtrrs - get_os_reserved_mtrrs())
419  printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
420 
421  rbase = base;
422  rsize = size;
423 
424  rbase = RANGE_TO_PHYS_ADDR(rbase);
425  rsize = RANGE_TO_PHYS_ADDR(rsize);
426  rsize = -rsize;
427 
428  mask = (1ULL << var_state->address_bits) - 1;
429  rsize = rsize & mask;
430 
431  printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
432  var_state->mtrr_index, rbase, rsize, mtrr_type);
433 
434  regs = &var_state->regs[var_state->mtrr_index];
435 
436  regs->base.lo = rbase;
437  regs->base.lo |= mtrr_type;
438  regs->base.hi = rbase >> 32;
439 
440  regs->mask.lo = rsize;
441  regs->mask.lo |= MTRR_PHYS_MASK_VALID;
442  regs->mask.hi = rsize >> 32;
443 }
444 
445 /*
446  * fls64: find least significant bit set in a 64-bit word
447  * As samples, fls64(0x0) = 64; fls64(0x4400) = 10;
448  * fls64(0x40400000000) = 34.
449  */
451 {
452  uint32_t lo = (uint32_t)x;
453  if (lo)
454  return fls(lo);
455  uint32_t hi = x >> 32;
456  return fls(hi) + 32;
457 }
458 
459 /*
460  * fms64: find most significant bit set in a 64-bit word
461  * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
462  * fms64(0x40400000000) = 42.
463  */
465 {
466  uint32_t hi = (uint32_t)(x >> 32);
467  if (!hi)
468  return fms((uint32_t)x);
469  return fms(hi) + 32;
470 }
471 
472 static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
473  uint64_t base, uint64_t size, int mtrr_type)
474 {
475  while (size != 0) {
476  uint32_t addr_lsb;
477  uint32_t size_msb;
478  uint64_t mtrr_size;
479 
480  addr_lsb = fls64(base);
481  size_msb = fms64(size);
482 
483  /* All MTRR entries need to have their base aligned to the mask
484  * size. The maximum size is calculated by a function of the
485  * min base bit set and maximum size bit set. */
486  if (addr_lsb > size_msb)
487  mtrr_size = 1ULL << size_msb;
488  else
489  mtrr_size = 1ULL << addr_lsb;
490 
491  if (var_state->prepare_msrs)
492  prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
493 
494  size -= mtrr_size;
495  base += mtrr_size;
496  var_state->mtrr_index++;
497  }
498 }
499 
501  const uint64_t hole,
502  const uint64_t limit,
503  const int carve_hole)
504 {
505  /*
506  * With default type UC, we can potentially optimize a WB
507  * range with unaligned upper end, by aligning it up and
508  * carving the added "hole" out again.
509  *
510  * To optimize the upper end of the hole, we will test
511  * how many MTRRs calc_var_mtrr_range() will spend for any
512  * alignment of the hole's upper end.
513  *
514  * We take four parameters, the lower end of the WB range
515  * `base`, upper end of the WB range as start of the `hole`,
516  * a `limit` how far we may align the upper end of the hole
517  * up and a flag `carve_hole` whether we should count MTRRs
518  * for carving the hole out. We return the optimal upper end
519  * for the hole (which may be the same as the end of the WB
520  * range in case we don't gain anything by aligning up).
521  */
522 
523  const int dont_care = 0;
524  struct var_mtrr_state var_state = { 0, };
525 
526  unsigned int align, best_count;
527  uint32_t best_end = hole;
528 
529  /* calculate MTRR count for the WB range alone (w/o a hole) */
530  calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
531  best_count = var_state.mtrr_index;
532  var_state.mtrr_index = 0;
533 
534  for (align = fls(hole) + 1; align <= fms(hole); ++align) {
535  const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
536  if (hole_end > limit)
537  break;
538 
539  /* calculate MTRR count for this alignment */
541  &var_state, base, hole_end - base, dont_care);
542  if (carve_hole)
544  &var_state, hole, hole_end - hole, dont_care);
545 
546  if (var_state.mtrr_index < best_count) {
547  best_count = var_state.mtrr_index;
548  best_end = hole_end;
549  }
550  var_state.mtrr_index = 0;
551  }
552 
553  return best_end;
554 }
555 
556 static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
557  struct range_entry *r)
558 {
559  uint64_t a1, a2, b1, b2;
560  int mtrr_type, carve_hole;
561 
562  /*
563  * Determine MTRRs based on the following algorithm for the given entry:
564  * +------------------+ b2 = ALIGN_UP(end)
565  * | 0 or more bytes | <-- hole is carved out between b1 and b2
566  * +------------------+ a2 = b1 = original end
567  * | |
568  * +------------------+ a1 = begin
569  *
570  * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
571  */
572  mtrr_type = range_entry_mtrr_type(r);
573 
576 
577  /* The end address is within the first 1MiB. The fixed MTRRs take
578  * precedence over the variable ones. Therefore this range
579  * can be ignored. */
580  if (a2 <= RANGE_1MB)
581  return;
582 
583  /* Again, the fixed MTRRs take precedence so the beginning
584  * of the range can be set to 0 if it starts at or below 1MiB. */
585  if (a1 <= RANGE_1MB)
586  a1 = 0;
587 
588  /* If the range starts above 4GiB the processing is done. */
589  if (!var_state->above4gb && a1 >= RANGE_4GB)
590  return;
591 
592  /* Clip the upper address to 4GiB if addresses above 4GiB
593  * are not being processed. */
594  if (!var_state->above4gb && a2 > RANGE_4GB)
595  a2 = RANGE_4GB;
596 
597  b1 = a2;
598  b2 = a2;
599  carve_hole = 0;
600 
601  /* We only consider WB type ranges for hole-carving. */
602  if (mtrr_type == MTRR_TYPE_WRBACK) {
603  struct range_entry *next;
604  uint64_t b2_limit;
605  /*
606  * Depending on the type of the next range, there are three
607  * different situations to handle:
608  *
609  * 1. WB range is last in address space:
610  * Aligning up, up to the next power of 2, may gain us
611  * something.
612  *
613  * 2. The next range is of type UC:
614  * We may align up, up to the _end_ of the next range. If
615  * there is a gap between the current and the next range,
616  * it would have been covered by the default type UC anyway.
617  *
618  * 3. The next range is not of type UC:
619  * We may align up, up to the _base_ of the next range. This
620  * may either be the end of the current range (if the next
621  * range follows immediately) or the end of the gap between
622  * the ranges.
623  */
624  next = memranges_next_entry(var_state->addr_space, r);
625  if (next == NULL) {
626  b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
627  /* If it's the last range above 4GiB, we won't carve
628  the hole out. If an OS wanted to move MMIO there,
629  it would have to override the MTRR setting using
630  PAT just like it would with WB as default type. */
631  carve_hole = a1 < RANGE_4GB;
632  } else if (range_entry_mtrr_type(next)
634  b2_limit = range_entry_end_mtrr_addr(next);
635  carve_hole = 1;
636  } else {
637  b2_limit = range_entry_base_mtrr_addr(next);
638  carve_hole = 1;
639  }
640  b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
641  }
642 
643  calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
644  if (carve_hole && b2 != b1) {
645  calc_var_mtrr_range(var_state, b1, b2 - b1,
647  }
648 }
649 
650 static void __calc_var_mtrrs(struct memranges *addr_space,
651  int above4gb, int address_bits,
652  int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
653 {
654  int wb_deftype_count;
655  int uc_deftype_count;
656  struct range_entry *r;
657  struct var_mtrr_state var_state;
658 
659  /* The default MTRR cacheability type is determined by calculating
660  * the number of MTRRs required for each MTRR type as if it was the
661  * default. */
662  var_state.addr_space = addr_space;
663  var_state.above4gb = above4gb;
664  var_state.address_bits = address_bits;
665  var_state.prepare_msrs = 0;
666 
667  wb_deftype_count = 0;
668  uc_deftype_count = 0;
669 
670  /*
671  * For each range do 2 calculations:
672  * 1. UC as default type with possible holes at top of range.
673  * 2. WB as default.
674  * The lowest count is then used as default after totaling all
675  * MTRRs. UC takes precedence in the MTRR architecture. There-
676  * fore, only holes can be used when the type of the region is
677  * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
678  * type.
679  */
680  memranges_each_entry(r, var_state.addr_space) {
681  int mtrr_type;
682 
683  mtrr_type = range_entry_mtrr_type(r);
684 
685  if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
686  var_state.mtrr_index = 0;
688  calc_var_mtrrs_with_hole(&var_state, r);
689  uc_deftype_count += var_state.mtrr_index;
690  }
691 
692  if (mtrr_type != MTRR_TYPE_WRBACK) {
693  var_state.mtrr_index = 0;
694  var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
695  calc_var_mtrrs_with_hole(&var_state, r);
696  wb_deftype_count += var_state.mtrr_index;
697  }
698  }
699 
700  *num_def_wb_mtrrs = wb_deftype_count;
701  *num_def_uc_mtrrs = uc_deftype_count;
702 }
703 
705  int above4gb, int address_bits)
706 {
707  int wb_deftype_count = 0;
708  int uc_deftype_count = 0;
709 
710  __calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
711  &uc_deftype_count);
712 
713  const int bios_mtrrs = total_mtrrs - get_os_reserved_mtrrs();
714  if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
715  printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
716  "WB/UC MTRR counts: %d/%d > %d.\n",
717  wb_deftype_count, uc_deftype_count, bios_mtrrs);
721  &wb_deftype_count, &uc_deftype_count);
722  }
723 
724  printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
725  wb_deftype_count, uc_deftype_count);
726 
727  if (wb_deftype_count < uc_deftype_count) {
728  printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
729  return MTRR_TYPE_WRBACK;
730  }
731  printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
732  return MTRR_TYPE_UNCACHEABLE;
733 }
734 
735 static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
736  int above4gb, int address_bits,
737  struct var_mtrr_solution *sol)
738 {
739  struct range_entry *r;
740  struct var_mtrr_state var_state;
741 
742  var_state.addr_space = addr_space;
743  var_state.above4gb = above4gb;
744  var_state.address_bits = address_bits;
745  /* Prepare the MSRs. */
746  var_state.prepare_msrs = 1;
747  var_state.mtrr_index = 0;
748  var_state.def_mtrr_type = def_type;
749  var_state.regs = &sol->regs[0];
750 
751  memranges_each_entry(r, var_state.addr_space) {
752  if (range_entry_mtrr_type(r) == def_type)
753  continue;
754  calc_var_mtrrs_with_hole(&var_state, r);
755  }
756 
757  /* Update the solution. */
758  sol->num_used = var_state.mtrr_index;
759 }
760 
761 static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
762 {
763  int i;
764 
765  if (sol->num_used > total_mtrrs) {
766  printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
767  sol->num_used, total_mtrrs);
768  return -1;
769  }
770 
771  /* Write out the variable MTRRs. */
772  disable_cache();
773  for (i = 0; i < sol->num_used; i++) {
774  wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
775  wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
776  }
777  /* Clear the ones that are unused. */
778  for (; i < total_mtrrs; i++)
779  clear_var_mtrr(i);
781  enable_cache();
782 
783  return 0;
784 }
785 
786 void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
787 {
788  static struct var_mtrr_solution *sol = NULL;
789  struct memranges *addr_space;
790 
791  addr_space = get_physical_address_space();
792 
793  if (sol == NULL) {
794  sol = &mtrr_global_solution;
795  sol->mtrr_default_type =
796  calc_var_mtrrs(addr_space, !!above4gb, address_bits);
797  prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
798  !!above4gb, address_bits, sol);
799  }
800 
801  commit_var_mtrrs(sol);
802 }
803 
804 static void _x86_setup_mtrrs(unsigned int above4gb)
805 {
806  int address_size;
807 
809  address_size = cpu_phys_address_size();
810  printk(BIOS_DEBUG, "CPU physical address size: %d bits\n",
811  address_size);
812  x86_setup_var_mtrrs(address_size, above4gb);
813 }
814 
815 void x86_setup_mtrrs(void)
816 {
817  /* Without detect, assume the minimum */
819  /* Always handle addresses above 4GiB. */
820  _x86_setup_mtrrs(1);
821 }
822 
824 {
826  /* Always handle addresses above 4GiB. */
827  _x86_setup_mtrrs(1);
828 }
829 
831 {
833  _x86_setup_mtrrs(0);
834 }
835 
836 void x86_mtrr_check(void)
837 {
838  /* Only Pentium Pro and later have MTRR */
839  msr_t msr;
840  printk(BIOS_DEBUG, "\nMTRR check\n");
841 
842  msr = rdmsr(MTRR_DEF_TYPE_MSR);
843 
844  printk(BIOS_DEBUG, "Fixed MTRRs : ");
845  if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
846  printk(BIOS_DEBUG, "Enabled\n");
847  else
848  printk(BIOS_DEBUG, "Disabled\n");
849 
850  printk(BIOS_DEBUG, "Variable MTRRs: ");
851  if (msr.lo & MTRR_DEF_TYPE_EN)
852  printk(BIOS_DEBUG, "Enabled\n");
853  else
854  printk(BIOS_DEBUG, "Disabled\n");
855 
856  printk(BIOS_DEBUG, "\n");
857 
858  post_code(0x93);
859 }
860 
862 
864 {
866 }
867 
868 void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
869 {
870  const struct range_entry *r;
871  const struct memranges *orig;
872  struct var_mtrr_solution sol;
873  struct memranges addr_space;
874  const int above4gb = 1; /* Cover above 4GiB by default. */
875  int address_bits;
876 
877  /* Make a copy of the original address space and tweak it with the
878  * provided range. */
879  memranges_init_empty(&addr_space, NULL, 0);
881  memranges_each_entry(r, orig) {
882  unsigned long tag = range_entry_tag(r);
883 
884  /* Remove any write combining MTRRs from the temporary
885  * solution as it just fragments the address space. */
886  if (tag == MTRR_TYPE_WRCOMB)
887  tag = MTRR_TYPE_UNCACHEABLE;
888 
889  memranges_insert(&addr_space, range_entry_base(r),
890  range_entry_size(r), tag);
891  }
892 
893  /* Place new range into the address space. */
894  memranges_insert(&addr_space, begin, size, type);
895 
896  print_physical_address_space(&addr_space, "TEMPORARY");
897 
898  /* Calculate a new solution with the updated address space. */
899  address_bits = cpu_phys_address_size();
900  memset(&sol, 0, sizeof(sol));
901  sol.mtrr_default_type =
902  calc_var_mtrrs(&addr_space, above4gb, address_bits);
903  prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
904  above4gb, address_bits, &sol);
905 
906  if (commit_var_mtrrs(&sol) < 0)
907  printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
908  (long long)begin, (long long)begin + size - 1,
909  (long long)size, type);
910  else
912 
913  memranges_teardown(&addr_space);
914 }
915 
916 static void remove_temp_solution(void *unused)
917 {
920 }
921 
#define SYSCFG_MSR_MtrrFixDramModEn
Definition: mtrr.h:16
#define SYSCFG_MSR
Definition: mtrr.h:12
void * memset(void *dstpp, int c, size_t len)
Definition: memset.c:12
#define ASSERT(x)
Definition: assert.h:44
@ BS_PAYLOAD_BOOT
Definition: bootstate.h:89
@ BS_OS_RESUME
Definition: bootstate.h:86
@ BS_ON_ENTRY
Definition: bootstate.h:95
#define ARRAY_SIZE(a)
Definition: helpers.h:12
#define ALIGN_UP(x, a)
Definition: helpers.h:17
#define printk(level,...)
Definition: stdlib.h:16
static void prep_var_mtrr(struct var_mtrr_state *var_state, uint64_t base, uint64_t size, int mtrr_type)
Definition: mtrr.c:399
static uint32_t fms64(uint64_t x)
Definition: mtrr.c:464
static bool put_back_original_solution
Definition: mtrr.c:861
void fixed_mtrrs_hide_amd_rwdram(void)
Definition: mtrr.c:75
static uint64_t optimize_var_mtrr_hole(const uint64_t base, const uint64_t hole, const uint64_t limit, const int carve_hole)
Definition: mtrr.c:500
static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES]
Definition: mtrr.c:218
void enable_fixed_mtrr(void)
Definition: mtrr.c:54
#define MTRR_VERBOSE_LEVEL
Definition: mtrr.c:97
void need_restore_mtrr(void)
Definition: mtrr.c:863
void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
Definition: mtrr.c:786
static void __calc_var_mtrrs(struct memranges *addr_space, int above4gb, int address_bits, int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
Definition: mtrr.c:650
static void detect_var_mtrrs(void)
Definition: mtrr.c:42
static int total_mtrrs
Definition: mtrr.c:40
static int calc_var_mtrrs(struct memranges *addr_space, int above4gb, int address_bits)
Definition: mtrr.c:704
void x86_mtrr_check(void)
Definition: mtrr.c:836
static void prepare_var_mtrrs(struct memranges *addr_space, int def_type, int above4gb, int address_bits, struct var_mtrr_solution *sol)
Definition: mtrr.c:735
void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
Definition: mtrr.c:868
static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
Definition: mtrr.c:761
#define RANGE_1MB
Definition: mtrr.c:108
BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL)
#define MIN_MTRRS
Definition: mtrr.c:31
static uint64_t range_entry_base_mtrr_addr(struct range_entry *r)
Definition: mtrr.c:114
static int get_os_reserved_mtrrs(void)
Definition: mtrr.c:394
static void calc_fixed_mtrrs(void)
Definition: mtrr.c:230
static void remove_temp_solution(void *unused)
Definition: mtrr.c:916
void x86_setup_mtrrs_with_detect_no_above_4gb(void)
Definition: mtrr.c:830
static void commit_fixed_mtrrs(void)
Definition: mtrr.c:288
static int filter_vga_wrcomb(struct device *dev, struct resource *res)
Definition: mtrr.c:129
#define RANGE_TO_PHYS_ADDR(x)
Definition: mtrr.c:104
void x86_setup_fixed_mtrrs_no_enable(void)
Definition: mtrr.c:348
static uint64_t range_entry_end_mtrr_addr(struct range_entry *r)
Definition: mtrr.c:119
#define MTRR_TAG_MASK
Definition: mtrr.c:112
#define NUM_FIXED_MTRRS
Definition: mtrr.c:105
static int range_entry_mtrr_type(struct range_entry *r)
Definition: mtrr.c:124
void x86_setup_mtrrs_with_detect(void)
Definition: mtrr.c:823
static struct var_mtrr_solution mtrr_global_solution
Definition: mtrr.c:374
#define MTRR_FIXED_WRBACK_BITS
Definition: mtrr.c:28
#define PHYS_TO_RANGE_ADDR(x)
Definition: mtrr.c:103
static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state, struct range_entry *r)
Definition: mtrr.c:556
#define NUM_MTRR_STATIC_STORAGE
Definition: mtrr.c:38
static uint32_t fls64(uint64_t x)
Definition: mtrr.c:450
void fixed_mtrrs_expose_amd_rwdram(void)
Definition: mtrr.c:63
static void enable_var_mtrr(unsigned char deftype)
Definition: mtrr.c:87
#define RANGE_4GB
Definition: mtrr.c:109
void x86_setup_mtrrs(void)
Definition: mtrr.c:815
static struct memranges * get_physical_address_space(void)
Definition: mtrr.c:161
static void clear_var_mtrr(int index)
Definition: mtrr.c:386
static void print_physical_address_space(const struct memranges *addr_space, const char *identifier)
Definition: mtrr.c:143
void x86_setup_fixed_mtrrs(void)
Definition: mtrr.c:354
static void calc_var_mtrr_range(struct var_mtrr_state *var_state, uint64_t base, uint64_t size, int mtrr_type)
Definition: mtrr.c:472
static void _x86_setup_mtrrs(unsigned int above4gb)
Definition: mtrr.c:804
int cpu_phys_address_size(void)
Definition: cpu_common.c:46
@ CONFIG
Definition: dsi_common.h:201
static __always_inline void enable_cache(void)
Definition: cache.h:40
static __always_inline void disable_cache(void)
Definition: cache.h:48
static __always_inline msr_t rdmsr(unsigned int index)
Definition: msr.h:146
static __always_inline void wrmsr(unsigned int index, msr_t msr)
Definition: msr.h:157
unsigned int type
Definition: edid.c:57
int x
Definition: edid.c:994
#define BIOS_DEBUG
BIOS_DEBUG - Verbose output.
Definition: loglevel.h:128
#define BIOS_ERR
BIOS_ERR - System in incomplete state.
Definition: loglevel.h:72
#define BIOS_SPEW
BIOS_SPEW - Excessively verbose output.
Definition: loglevel.h:142
#define BIOS_WARNING
BIOS_WARNING - Bad configuration.
Definition: loglevel.h:86
static resource_t range_entry_base(const struct range_entry *r)
Definition: memrange.h:44
void memranges_fill_holes_up_to(struct memranges *ranges, resource_t limit, unsigned long tag)
Definition: memrange.c:329
static resource_t range_entry_end(const struct range_entry *r)
Definition: memrange.h:50
void memranges_update_tag(struct memranges *ranges, unsigned long old_tag, unsigned long new_tag)
Definition: memrange.c:194
#define memranges_each_entry(r, ranges)
Definition: memrange.h:82
static unsigned long range_entry_tag(const struct range_entry *r)
Definition: memrange.h:61
void memranges_teardown(struct memranges *ranges)
Definition: memrange.c:321
void memranges_insert(struct memranges *ranges, resource_t base, resource_t size, unsigned long tag)
Definition: memrange.c:236
#define memranges_init_empty(__ranges, __free, __num_free)
Definition: memrange.h:102
struct range_entry * memranges_next_entry(struct memranges *ranges, const struct range_entry *r)
Definition: memrange.c:373
static resource_t range_entry_size(const struct range_entry *r)
Definition: memrange.h:56
#define memranges_init(__ranges, __mask, __match, __tag)
Definition: memrange.h:108
void memranges_add_resources(struct memranges *ranges, unsigned long mask, unsigned long match, unsigned long tag)
Definition: memrange.c:276
void memranges_add_resources_filter(struct memranges *ranges, unsigned long mask, unsigned long match, unsigned long tag, memrange_filter_t filter)
Definition: memrange.c:259
@ DEVICE_PATH_PCI
Definition: path.h:9
#define PCI_CLASS_DISPLAY_VGA
Definition: pci_ids.h:35
#define post_code(value)
Definition: post_code.h:12
#define IORESOURCE_CACHEABLE
Definition: resource.h:19
u64 resource_t
Definition: resource.h:43
#define IORESOURCE_PREFETCH
Definition: resource.h:17
uintptr_t base
Definition: uart.c:17
static const int mask[4]
Definition: gpio.c:308
#define NULL
Definition: stddef.h:19
unsigned int uint32_t
Definition: stdint.h:14
unsigned long uintptr_t
Definition: stdint.h:21
unsigned long long uint64_t
Definition: stdint.h:17
unsigned char uint8_t
Definition: stdint.h:8
enum device_path_type type
Definition: path.h:114
Definition: device.h:107
struct device_path path
Definition: device.h:115
unsigned int class
Definition: device.h:120
int range_index
Definition: mtrr.c:213
uint32_t step
Definition: mtrr.c:212
int msr_index_base
Definition: mtrr.c:214
uint32_t begin
Definition: mtrr.c:210
uint32_t end
Definition: mtrr.c:211
unsigned int hi
Definition: msr.h:112
unsigned int lo
Definition: msr.h:111
Definition: memrange.h:24
struct range_entry * next
Definition: memrange.h:28
msr_t mask
Definition: mtrr.c:364
msr_t base
Definition: mtrr.c:363
struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE]
Definition: mtrr.c:370
int mtrr_default_type
Definition: mtrr.c:368
int address_bits
Definition: mtrr.c:379
int above4gb
Definition: mtrr.c:378
int mtrr_index
Definition: mtrr.c:381
struct memranges * addr_space
Definition: mtrr.c:377
int prepare_msrs
Definition: mtrr.c:380
int def_mtrr_type
Definition: mtrr.c:382
struct var_mtrr_regs * regs
Definition: mtrr.c:383
static struct am335x_pinmux_regs * regs
Definition: pinmux.c:7
#define NUM_FIXED_RANGES
Definition: mtrr.h:43
#define MTRR_FIX_64K_00000
Definition: mtrr.h:45
#define RANGES_PER_FIXED_MTRR
Definition: mtrr.h:44
static unsigned int fms(unsigned int x)
Definition: mtrr.h:156
#define MTRR_PHYS_BASE(reg)
Definition: mtrr.h:39
static unsigned int fls(unsigned int x)
Definition: mtrr.h:168
#define MTRR_PHYS_MASK(reg)
Definition: mtrr.h:40
static int get_var_mtrr_count(void)
Definition: mtrr.h:105
#define MTRR_DEF_TYPE_EN
Definition: mtrr.h:27
#define MTRR_DEF_TYPE_FIX_EN
Definition: mtrr.h:28
#define MTRR_TYPE_WRCOMB
Definition: mtrr.h:11
#define MTRR_FIX_4K_C0000
Definition: mtrr.h:48
#define MTRR_TYPE_UNCACHEABLE
Definition: mtrr.h:10
#define MTRR_FIX_16K_80000
Definition: mtrr.h:46
#define MTRR_DEF_TYPE_MSR
Definition: mtrr.h:25
#define MTRR_TYPE_WRBACK
Definition: mtrr.h:14
#define MTRR_PHYS_MASK_VALID
Definition: mtrr.h:41