coreboot
coreboot is an Open Source project aimed at replacing the proprietary BIOS found in most computers.
spi.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* NVIDIA Tegra SPI controller (T114 and later) */
3 
4 #include <arch/cache.h>
5 #include <device/mmio.h>
6 #include <assert.h>
7 #include <console/console.h>
8 #include <delay.h>
9 #include <soc/addressmap.h>
10 #include <soc/dma.h>
11 #include <soc/spi.h>
12 #include <spi-generic.h>
13 #include <spi_flash.h>
14 #include <timer.h>
15 #include <types.h>
16 
17 #if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
18 # define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
19 #else
20 # define DEBUG_SPI(x,...)
21 #endif
22 
23 /*
24  * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
25  * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
26  */
27 #define SPI_PACKET_SIZE_BYTES 1
28 #define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
29 #define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
30 
31 /*
32  * This is used to workaround an issue seen where it may take some time for
33  * packets to show up in the FIFO after they have been received and the
34  * BLOCK_COUNT has been incremented.
35  */
36 #define SPI_FIFO_XFER_TIMEOUT_US 1000
37 
38 /* COMMAND1 */
39 #define SPI_CMD1_GO (1 << 31)
40 #define SPI_CMD1_M_S (1 << 30)
41 #define SPI_CMD1_MODE_MASK 0x3
42 #define SPI_CMD1_MODE_SHIFT 28
43 #define SPI_CMD1_CS_SEL_MASK 0x3
44 #define SPI_CMD1_CS_SEL_SHIFT 26
45 #define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
46 #define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
47 #define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
48 #define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
49 #define SPI_CMD1_CS_SW_HW (1 << 21)
50 #define SPI_CMD1_CS_SW_VAL (1 << 20)
51 #define SPI_CMD1_IDLE_SDA_MASK 0x3
52 #define SPI_CMD1_IDLE_SDA_SHIFT 18
53 #define SPI_CMD1_BIDIR (1 << 17)
54 #define SPI_CMD1_LSBI_FE (1 << 16)
55 #define SPI_CMD1_LSBY_FE (1 << 15)
56 #define SPI_CMD1_BOTH_EN_BIT (1 << 14)
57 #define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
58 #define SPI_CMD1_RX_EN (1 << 12)
59 #define SPI_CMD1_TX_EN (1 << 11)
60 #define SPI_CMD1_PACKED (1 << 5)
61 #define SPI_CMD1_BIT_LEN_MASK 0x1f
62 #define SPI_CMD1_BIT_LEN_SHIFT 0
63 
64 /* COMMAND2 */
65 #define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
66 #define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
67 #define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
68 #define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
69 
70 /* SPI_TRANS_STATUS */
71 #define SPI_STATUS_RDY (1 << 30)
72 #define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
73 #define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
74 #define SPI_STATUS_BLOCK_COUNT 0xffff
75 #define SPI_STATUS_BLOCK_COUNT_SHIFT 0
76 
77 /* SPI_FIFO_STATUS */
78 #define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
79 #define SPI_FIFO_STATUS_FRAME_END (1 << 30)
80 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
81 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
82 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
83 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
84 #define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
85 #define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
86 #define SPI_FIFO_STATUS_ERR (1 << 8)
87 #define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
88 #define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
89 #define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
90 #define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
91 #define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
92 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
93 #define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
94 #define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
95 
96 /* SPI_DMA_CTL */
97 #define SPI_DMA_CTL_DMA (1 << 31)
98 #define SPI_DMA_CTL_CONT (1 << 30)
99 #define SPI_DMA_CTL_IE_RX (1 << 29)
100 #define SPI_DMA_CTL_IE_TX (1 << 28)
101 #define SPI_DMA_CTL_RX_TRIG_MASK 0x3
102 #define SPI_DMA_CTL_RX_TRIG_SHIFT 19
103 #define SPI_DMA_CTL_TX_TRIG_MASK 0x3
104 #define SPI_DMA_CTL_TX_TRIG_SHIFT 15
105 
106 /* SPI_DMA_BLK */
107 #define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
108 #define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
109 
110 static struct tegra_spi_channel tegra_spi_channels[] = {
111  /*
112  * Note: Tegra pinmux must be setup for corresponding SPI channel in
113  * order for its registers to be accessible. If pinmux has not been
114  * set up, access to the channel's registers will simply hang.
115  *
116  * TODO(dhendrix): Clarify or remove this comment (is clock setup
117  * necessary first, or just pinmux, or both?)
118  */
119  {
120  .slave = { .bus = 1, },
121  .regs = (struct tegra_spi_regs *)TEGRA_SPI1_BASE,
122  .req_sel = APBDMA_SLAVE_SL2B1,
123  },
124  {
125  .slave = { .bus = 2, },
126  .regs = (struct tegra_spi_regs *)TEGRA_SPI2_BASE,
127  .req_sel = APBDMA_SLAVE_SL2B2,
128  },
129  {
130  .slave = { .bus = 3, },
131  .regs = (struct tegra_spi_regs *)TEGRA_SPI3_BASE,
132  .req_sel = APBDMA_SLAVE_SL2B3,
133  },
134  {
135  .slave = { .bus = 4, },
136  .regs = (struct tegra_spi_regs *)TEGRA_SPI4_BASE,
137  .req_sel = APBDMA_SLAVE_SL2B4,
138  },
139  {
140  .slave = { .bus = 5, },
141  .regs = (struct tegra_spi_regs *)TEGRA_SPI5_BASE,
142  .req_sel = APBDMA_SLAVE_SL2B5,
143  },
144  {
145  .slave = { .bus = 6, },
146  .regs = (struct tegra_spi_regs *)TEGRA_SPI6_BASE,
147  .req_sel = APBDMA_SLAVE_SL2B6,
148  },
149 };
150 
154 };
155 
156 struct tegra_spi_channel *tegra_spi_init(unsigned int bus)
157 {
158  int i;
159  struct tegra_spi_channel *spi = NULL;
160 
161  for (i = 0; i < ARRAY_SIZE(tegra_spi_channels); i++) {
162  if (tegra_spi_channels[i].slave.bus == bus) {
163  spi = &tegra_spi_channels[i];
164  break;
165  }
166  }
167  if (!spi)
168  return NULL;
169 
170  /* software drives chip-select, set value to high */
171  setbits32(&spi->regs->command1,
173 
174  /* 8-bit transfers, unpacked mode, most significant bit first */
175  clrbits32(&spi->regs->command1,
178 
179  return spi;
180 }
181 
182 static struct tegra_spi_channel * const to_tegra_spi(int bus) {
183  return &tegra_spi_channels[bus - 1];
184 }
185 
186 static unsigned int tegra_spi_speed(unsigned int bus)
187 {
188  /* FIXME: implement this properly, for now use max value (50MHz) */
189  return 50000000;
190 }
191 
192 static int spi_ctrlr_claim_bus(const struct spi_slave *slave)
193 {
195  u32 val;
196 
198 
199  val = read32(&regs->command1);
200 
201  /* select appropriate chip-select line */
204 
205  /* drive chip-select with the inverse of the "inactive" value */
208  else
210 
211  write32(&regs->command1, val);
212  return 0;
213 }
214 
215 static void spi_ctrlr_release_bus(const struct spi_slave *slave)
216 {
218  u32 val;
219 
220  val = read32(&regs->command1);
221 
224  else
226 
227  write32(&regs->command1, val);
228 }
229 
230 static void dump_fifo_status(struct tegra_spi_channel *spi)
231 {
232  u32 status = read32(&spi->regs->fifo_status);
233 
234  printk(BIOS_INFO, "Raw FIFO status: 0x%08x\n", status);
235  if (status & SPI_FIFO_STATUS_TX_FIFO_OVF)
236  printk(BIOS_INFO, "\tTx overflow detected\n");
237  if (status & SPI_FIFO_STATUS_TX_FIFO_UNR)
238  printk(BIOS_INFO, "\tTx underrun detected\n");
239  if (status & SPI_FIFO_STATUS_RX_FIFO_OVF)
240  printk(BIOS_INFO, "\tRx overflow detected\n");
241  if (status & SPI_FIFO_STATUS_RX_FIFO_UNR)
242  printk(BIOS_INFO, "\tRx underrun detected\n");
243 
244  printk(BIOS_INFO, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
245  read32(&spi->regs->tx_fifo), read32(&spi->regs->tx_data));
246  printk(BIOS_INFO, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
247  read32(&spi->regs->rx_fifo), read32(&spi->regs->rx_data));
248 }
249 
250 static void clear_fifo_status(struct tegra_spi_channel *spi)
251 {
252  clrbits32(&spi->regs->fifo_status,
258 }
259 
260 static void dump_spi_regs(struct tegra_spi_channel *spi)
261 {
262  printk(BIOS_INFO, "SPI regs:\n"
263  "\tdma_blk: 0x%08x\n"
264  "\tcommand1: 0x%08x\n"
265  "\tdma_ctl: 0x%08x\n"
266  "\ttrans_status: 0x%08x\n",
267  read32(&spi->regs->dma_blk),
268  read32(&spi->regs->command1),
269  read32(&spi->regs->dma_ctl),
270  read32(&spi->regs->trans_status));
271 }
272 
273 static void dump_dma_regs(struct apb_dma_channel *dma)
274 {
275  if (dma == NULL)
276  return;
277 
278  printk(BIOS_INFO, "DMA regs:\n"
279  "\tahb_ptr: 0x%08x\n"
280  "\tapb_ptr: 0x%08x\n"
281  "\tahb_seq: 0x%08x\n"
282  "\tapb_seq: 0x%08x\n"
283  "\tcsr: 0x%08x\n"
284  "\tcsre: 0x%08x\n"
285  "\twcount: 0x%08x\n"
286  "\tdma_byte_sta: 0x%08x\n"
287  "\tword_transfer: 0x%08x\n",
288  read32(&dma->regs->ahb_ptr),
289  read32(&dma->regs->apb_ptr),
290  read32(&dma->regs->ahb_seq),
291  read32(&dma->regs->apb_seq),
292  read32(&dma->regs->csr),
293  read32(&dma->regs->csre),
294  read32(&dma->regs->wcount),
295  read32(&dma->regs->dma_byte_sta),
296  read32(&dma->regs->word_transfer));
297 }
298 
299 static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
300 {
301  /* FIXME: Make this take total packet size into account */
302  return read32(&spi->regs->trans_status) &
304 }
305 
306 /*
307  * This calls udelay() with a calculated value based on the SPI speed and
308  * number of bytes remaining to be transferred. It assumes that if the
309  * calculated delay period is less than MIN_DELAY_US then it is probably
310  * not worth the overhead of yielding.
311  */
312 #define MIN_DELAY_US 250
313 static void spi_delay(struct tegra_spi_channel *spi,
314  unsigned int bytes_remaining)
315 {
316  unsigned int ns_per_byte, delay_us;
317 
318  ns_per_byte = 1000000000 / (tegra_spi_speed(spi->slave.bus) / 8);
319  delay_us = (ns_per_byte * bytes_remaining) / 1000;
320 
321  if (delay_us < MIN_DELAY_US)
322  return;
323 
324  udelay(delay_us);
325 }
326 
327 static void tegra_spi_wait(struct tegra_spi_channel *spi)
328 {
329  unsigned int count, dma_blk;
330 
331  dma_blk = 1 + (read32(&spi->regs->dma_blk) &
333 
334  while ((count = spi_byte_count(spi)) != dma_blk)
335  spi_delay(spi, dma_blk - count);
336 }
337 
338 static int fifo_error(struct tegra_spi_channel *spi)
339 {
340  return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
341 }
342 
344  unsigned int bytes, enum spi_direction dir)
345 {
346  u8 *p = spi->out_buf;
347  unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
348  u32 flush_mask, enable_mask;
349 
350  if (dir == SPI_SEND) {
351  flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH;
352  enable_mask = SPI_CMD1_TX_EN;
353  } else {
354  flush_mask = SPI_FIFO_STATUS_RX_FIFO_FLUSH;
355  enable_mask = SPI_CMD1_RX_EN;
356  }
357 
358  setbits32(&spi->regs->fifo_status, flush_mask);
359  while (read32(&spi->regs->fifo_status) & flush_mask)
360  ;
361 
362  setbits32(&spi->regs->command1, enable_mask);
363 
364  /* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
365  * PIO transfers */
366  write32(&spi->regs->dma_blk, todo - 1);
367 
368  if (dir == SPI_SEND) {
369  unsigned int to_fifo = bytes;
370  while (to_fifo) {
371  write32(&spi->regs->tx_fifo, *p);
372  p++;
373  to_fifo--;
374  }
375  }
376 
377  return todo;
378 }
379 
380 static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
381 {
384  /* Make sure the write to command1 completes. */
385  read32(&spi->regs->command1);
386 }
387 
388 static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
389 {
390  return (read32(&spi->regs->fifo_status) >>
393 }
394 
396 {
397  u8 *p = spi->in_buf;
398  struct stopwatch sw;
399 
401 
402  /*
403  * Allow some time in case the Rx FIFO does not yet have
404  * all packets pushed into it. See chrome-os-partner:24215.
405  */
407  do {
408  if (rx_fifo_count(spi) == spi_byte_count(spi))
409  break;
410  } while (!stopwatch_expired(&sw));
411 
412  while (!(read32(&spi->regs->fifo_status) &
414  *p = read8(&spi->regs->rx_fifo);
415  p++;
416  }
417 
418  if (fifo_error(spi)) {
419  printk(BIOS_ERR, "%s: ERROR:\n", __func__);
420  dump_spi_regs(spi);
421  dump_fifo_status(spi);
422  return -1;
423  }
424 
425  return 0;
426 }
427 
428 static void setup_dma_params(struct tegra_spi_channel *spi,
429  struct apb_dma_channel *dma)
430 {
431  /* APB bus width = 8-bits, address wrap for each word */
432  clrbits32(&dma->regs->apb_seq,
434  /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
435  * no address wrapping */
436  clrsetbits32(&dma->regs->ahb_seq,
438  4 << AHB_BURST_SHIFT);
439 
440  /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
441  * flow control. */
442  clrbits32(&dma->regs->csr,
444  setbits32(&dma->regs->csr, APB_CSR_ONCE | APB_CSR_FLOW |
445  (spi->req_sel << APB_CSR_REQ_SEL_SHIFT));
446 }
447 
449  unsigned int bytes, enum spi_direction dir)
450 {
451  unsigned int todo, wcount;
452 
453  /*
454  * For DMA we need to think of things in terms of word count.
455  * AHB width is fixed at 32-bits. To avoid overrunning
456  * the in/out buffers we must align down. (Note: lowest 2-bits
457  * in WCOUNT register are ignored, and WCOUNT seems to count
458  * words starting at n-1)
459  *
460  * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
461  * WCOUNT should be 4. The remaining 3 bytes must be transferred
462  * using PIO.
463  */
465  todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
467 
468  if (dir == SPI_SEND) {
469  spi->dma_out = dma_claim();
470  if (!spi->dma_out)
471  return -1;
472 
473  /* ensure bytes to send will be visible to DMA controller */
474  dcache_clean_by_mva(spi->out_buf, bytes);
475 
476  write32(&spi->dma_out->regs->apb_ptr,
477  (u32)&spi->regs->tx_fifo);
478  write32(&spi->dma_out->regs->ahb_ptr, (u32)spi->out_buf);
480  setup_dma_params(spi, spi->dma_out);
481  write32(&spi->dma_out->regs->wcount, wcount);
482  } else {
483  spi->dma_in = dma_claim();
484  if (!spi->dma_in)
485  return -1;
486 
487  /* avoid data collisions */
489 
490  write32(&spi->dma_in->regs->apb_ptr, (u32)&spi->regs->rx_fifo);
491  write32(&spi->dma_in->regs->ahb_ptr, (u32)spi->in_buf);
492  clrbits32(&spi->dma_in->regs->csr, APB_CSR_DIR);
493  setup_dma_params(spi, spi->dma_in);
494  write32(&spi->dma_in->regs->wcount, wcount);
495  }
496 
497  /* BLOCK_SIZE starts at n-1 */
498  write32(&spi->regs->dma_blk, todo - 1);
499  return todo;
500 }
501 
502 static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
503 {
504  /*
505  * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
506  * (set bit to clear) between each transaction. Otherwise the next
507  * transaction does not start.
508  */
510 
511  if (spi->dma_out)
513  if (spi->dma_in)
515 
516  /*
517  * To avoid underrun conditions, enable APB DMA before SPI DMA for
518  * Tx and enable SPI DMA before APB DMA before Rx.
519  */
520  if (spi->dma_out)
521  dma_start(spi->dma_out);
523  if (spi->dma_in)
524  dma_start(spi->dma_in);
525 }
526 
528 {
529  int ret;
530  unsigned int todo;
531 
532  if (spi->dma_in) {
533  todo = read32(&spi->dma_in->regs->wcount);
534 
535  while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
536  dma_busy(spi->dma_in))
537  ; /* this shouldn't take long, no udelay */
538  dma_stop(spi->dma_in);
540  dma_release(spi->dma_in);
541  }
542 
543  if (spi->dma_out) {
544  todo = read32(&spi->dma_out->regs->wcount);
545 
546  while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
547  dma_busy(spi->dma_out)) {
548  spi_delay(spi, todo - spi_byte_count(spi));
549  }
551  dma_stop(spi->dma_out);
552  dma_release(spi->dma_out);
553  }
554 
555  if (fifo_error(spi)) {
556  printk(BIOS_ERR, "%s: ERROR:\n", __func__);
557  dump_dma_regs(spi->dma_out);
558  dump_dma_regs(spi->dma_in);
559  dump_spi_regs(spi);
560  dump_fifo_status(spi);
561  ret = -1;
562  goto done;
563  }
564 
565  ret = 0;
566 done:
567  spi->dma_in = NULL;
568  spi->dma_out = NULL;
569  return ret;
570 }
571 
572 /*
573  * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
574  * sets transfer mode used by this channel (if not set already).
575  *
576  * A few caveats to watch out for:
577  * - The number of bytes which can be transferred may be smaller than the
578  * number of bytes the caller specifies. The number of bytes ready for
579  * a transfer will be returned (unless an error occurs).
580  *
581  * - Only one mode can be used for both RX and TX. The transfer mode of the
582  * SPI channel (spi->xfer_mode) is checked each time this function is called.
583  * If conflicting modes are detected, spi->xfer_mode will be set to
584  * XFER_MODE_NONE and an error will be returned.
585  *
586  * Returns bytes ready for transfer if successful, <0 to indicate error.
587  */
588 static int xfer_setup(struct tegra_spi_channel *spi, void *buf,
589  unsigned int bytes, enum spi_direction dir)
590 {
591  unsigned int line_size = dcache_line_bytes();
592  unsigned int align;
593  int ret = -1;
594 
595  if (!bytes)
596  return 0;
597 
598  if (dir == SPI_SEND)
599  spi->out_buf = buf;
600  else if (dir == SPI_RECEIVE)
601  spi->in_buf = buf;
602 
603  /*
604  * Alignment consideratons:
605  * When we enable caching we'll need to clean/invalidate portions of
606  * memory. So we need to be careful about memory alignment. Also, DMA
607  * likes to operate on 4-bytes at a time on the AHB side. So for
608  * example, if we only want to receive 1 byte, 4 bytes will be
609  * written in memory even if those extra 3 bytes are beyond the length
610  * we want.
611  *
612  * For now we'll use PIO to send/receive unaligned bytes. We may
613  * consider setting aside some space for a kind of bounce buffer to
614  * stay in DMA mode once we have a chance to benchmark the two
615  * approaches.
616  */
617 
618  if (bytes < line_size) {
619  if (spi->xfer_mode == XFER_MODE_DMA) {
620  spi->xfer_mode = XFER_MODE_NONE;
621  ret = -1;
622  } else {
623  spi->xfer_mode = XFER_MODE_PIO;
624  ret = tegra_spi_pio_prepare(spi, bytes, dir);
625  }
626  goto done;
627  }
628 
629  /* transfer bytes before the aligned boundary */
630  align = line_size - ((uintptr_t)buf % line_size);
631  if ((align != 0) && (align != line_size)) {
632  if (spi->xfer_mode == XFER_MODE_DMA) {
633  spi->xfer_mode = XFER_MODE_NONE;
634  ret = -1;
635  } else {
636  spi->xfer_mode = XFER_MODE_PIO;
637  ret = tegra_spi_pio_prepare(spi, align, dir);
638  }
639  goto done;
640  }
641 
642  /* do aligned DMA transfer */
643  align = (((uintptr_t)buf + bytes) % line_size);
644  if (bytes - align > 0) {
645  unsigned int dma_bytes = bytes - align;
646 
647  if (spi->xfer_mode == XFER_MODE_PIO) {
648  spi->xfer_mode = XFER_MODE_NONE;
649  ret = -1;
650  } else {
651  spi->xfer_mode = XFER_MODE_DMA;
652  ret = tegra_spi_dma_prepare(spi, dma_bytes, dir);
653  }
654 
655  goto done;
656  }
657 
658  /* transfer any remaining unaligned bytes */
659  if (align) {
660  if (spi->xfer_mode == XFER_MODE_DMA) {
661  spi->xfer_mode = XFER_MODE_NONE;
662  ret = -1;
663  } else {
664  spi->xfer_mode = XFER_MODE_PIO;
665  ret = tegra_spi_pio_prepare(spi, align, dir);
666  }
667  goto done;
668  }
669 
670 done:
671  return ret;
672 }
673 
674 static void xfer_start(struct tegra_spi_channel *spi)
675 {
676  if (spi->xfer_mode == XFER_MODE_DMA)
677  tegra_spi_dma_start(spi);
678  else
679  tegra_spi_pio_start(spi);
680 }
681 
682 static void xfer_wait(struct tegra_spi_channel *spi)
683 {
684  tegra_spi_wait(spi);
685 }
686 
687 static int xfer_finish(struct tegra_spi_channel *spi)
688 {
689  int ret;
690 
691  if (spi->xfer_mode == XFER_MODE_DMA)
692  ret = tegra_spi_dma_finish(spi);
693  else
694  ret = tegra_spi_pio_finish(spi);
695 
696  spi->xfer_mode = XFER_MODE_NONE;
697  return ret;
698 }
699 
700 static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout,
701  size_t out_bytes, void *din, size_t in_bytes)
702 {
703  struct tegra_spi_channel *spi = to_tegra_spi(slave->bus);
704  u8 *out_buf = (u8 *)dout;
705  u8 *in_buf = (u8 *)din;
706  size_t todo;
707  int ret = 0;
708 
709  /* tegra bus numbers start at 1 */
711 
712  while (out_bytes || in_bytes) {
713  int x = 0;
714 
715  if (out_bytes == 0)
716  todo = in_bytes;
717  else if (in_bytes == 0)
718  todo = out_bytes;
719  else
720  todo = MIN(out_bytes, in_bytes);
721 
722  if (out_bytes) {
723  x = xfer_setup(spi, out_buf, todo, SPI_SEND);
724  if (x < 0) {
725  if (spi->xfer_mode == XFER_MODE_NONE) {
726  spi->xfer_mode = XFER_MODE_PIO;
727  continue;
728  } else {
729  ret = -1;
730  break;
731  }
732  }
733  }
734  if (in_bytes) {
735  x = xfer_setup(spi, in_buf, todo, SPI_RECEIVE);
736  if (x < 0) {
737  if (spi->xfer_mode == XFER_MODE_NONE) {
738  spi->xfer_mode = XFER_MODE_PIO;
739  continue;
740  } else {
741  ret = -1;
742  break;
743  }
744  }
745  }
746 
747  /*
748  * Note: Some devices (such as Chrome EC) are sensitive to
749  * delays, so be careful when adding debug prints not to
750  * cause timeouts between transfers.
751  */
752  xfer_start(spi);
753  xfer_wait(spi);
754  if (xfer_finish(spi)) {
755  ret = -1;
756  break;
757  }
758 
759  /* Post-processing. */
760  if (out_bytes) {
761  out_bytes -= x;
762  out_buf += x;
763  }
764  if (in_bytes) {
765  in_bytes -= x;
766  in_buf += x;
767  }
768  }
769 
770  if (ret < 0) {
771  printk(BIOS_ERR, "%s: Error detected\n", __func__);
772  printk(BIOS_ERR, "Transaction size: %u, bytes remaining: "
773  "%u out / %u in\n", todo, out_bytes, in_bytes);
774  clear_fifo_status(spi);
775  }
776  return ret;
777 }
778 
779 static const struct spi_ctrlr spi_ctrlr = {
781  .release_bus = spi_ctrlr_release_bus,
782  .xfer = spi_ctrlr_xfer,
783  .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
784 };
785 
786 const struct spi_ctrlr_buses spi_ctrlr_bus_map[] = {
787  {
788  .ctrlr = &spi_ctrlr,
789  .bus_start = 1,
790  .bus_end = ARRAY_SIZE(tegra_spi_channels)
791  },
792 };
793 
void dcache_clean_by_mva(void const *addr, size_t len)
Definition: cache.c:37
unsigned int dcache_line_bytes(void)
Definition: cache.c:26
static void write32(void *addr, uint32_t val)
Definition: mmio.h:40
static uint32_t read32(const void *addr)
Definition: mmio.h:22
static uint8_t read8(const void *addr)
Definition: mmio.h:12
#define ASSERT(x)
Definition: assert.h:44
#define ARRAY_SIZE(a)
Definition: helpers.h:12
#define MIN(a, b)
Definition: helpers.h:37
#define ALIGN_DOWN(x, a)
Definition: helpers.h:18
#define printk(level,...)
Definition: stdlib.h:16
#define setbits32(addr, set)
Definition: mmio.h:21
#define clrsetbits32(addr, clear, set)
Definition: mmio.h:16
#define clrbits32(addr, clear)
Definition: mmio.h:26
static int stopwatch_expired(struct stopwatch *sw)
Definition: timer.h:152
static void stopwatch_init_usecs_expire(struct stopwatch *sw, long us)
Definition: timer.h:127
int x
Definition: edid.c:994
#define BIOS_INFO
BIOS_INFO - Expected events.
Definition: loglevel.h:113
#define BIOS_ERR
BIOS_ERR - System in incomplete state.
Definition: loglevel.h:72
static uint8_t * buf
Definition: uart.c:7
struct @1399 * dma
const struct spi_ctrlr_buses spi_ctrlr_bus_map[]
Definition: spi.c:401
const size_t spi_ctrlr_bus_map_count
Definition: spi.c:408
const struct spi_ctrlr spi_ctrlr
Definition: spi.c:261
@ TEGRA_SPI5_BASE
Definition: addressmap.h:48
@ TEGRA_SPI2_BASE
Definition: addressmap.h:45
@ TEGRA_SPI3_BASE
Definition: addressmap.h:46
@ TEGRA_SPI1_BASE
Definition: addressmap.h:44
@ TEGRA_SPI4_BASE
Definition: addressmap.h:47
@ TEGRA_SPI6_BASE
Definition: addressmap.h:49
@ XFER_MODE_PIO
Definition: spi.h:31
@ XFER_MODE_NONE
Definition: spi.h:30
@ XFER_MODE_DMA
Definition: spi.h:32
struct tegra_spi_channel * tegra_spi_init(unsigned int bus)
Definition: spi.c:156
static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
Definition: spi.c:527
#define SPI_FIFO_STATUS_RX_FIFO_UNR
Definition: spi.c:90
#define SPI_CMD1_CS_SEL_MASK
Definition: spi.c:43
static int xfer_setup(struct tegra_spi_channel *spi, void *buf, unsigned int bytes, enum spi_direction dir)
Definition: spi.c:588
static struct tegra_spi_channel tegra_spi_channels[]
Definition: spi.c:110
spi_direction
Definition: spi.c:151
@ SPI_RECEIVE
Definition: spi.c:153
@ SPI_SEND
Definition: spi.c:152
#define SPI_CMD1_GO
Definition: spi.c:39
#define SPI_CMD1_BIT_LEN_SHIFT
Definition: spi.c:62
#define SPI_CMD1_CS_POL_INACTIVE0
Definition: spi.c:48
static void spi_ctrlr_release_bus(const struct spi_slave *slave)
Definition: spi.c:215
static void dump_spi_regs(struct tegra_spi_channel *spi)
Definition: spi.c:260
static void clear_fifo_status(struct tegra_spi_channel *spi)
Definition: spi.c:250
static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi, unsigned int bytes, enum spi_direction dir)
Definition: spi.c:448
#define SPI_CMD1_TX_EN
Definition: spi.c:59
#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK
Definition: spi.c:80
#define SPI_FIFO_STATUS_TX_FIFO_FLUSH
Definition: spi.c:85
static void tegra_spi_wait(struct tegra_spi_channel *spi)
Definition: spi.c:327
#define SPI_FIFO_STATUS_TX_FIFO_UNR
Definition: spi.c:88
static void xfer_start(struct tegra_spi_channel *spi)
Definition: spi.c:674
static void setup_dma_params(struct tegra_spi_channel *spi, struct apb_dma_channel *dma)
Definition: spi.c:428
#define MIN_DELAY_US
Definition: spi.c:312
static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi, unsigned int bytes, enum spi_direction dir)
Definition: spi.c:343
#define SPI_FIFO_STATUS_TX_FIFO_OVF
Definition: spi.c:87
static void dump_dma_regs(struct apb_dma_channel *dma)
Definition: spi.c:273
static unsigned int tegra_spi_speed(unsigned int bus)
Definition: spi.c:186
#define SPI_CMD1_CS_SW_HW
Definition: spi.c:49
static void dump_fifo_status(struct tegra_spi_channel *spi)
Definition: spi.c:230
#define SPI_FIFO_XFER_TIMEOUT_US
Definition: spi.c:36
#define SPI_DMA_CTL_DMA
Definition: spi.c:97
#define SPI_CMD1_BIT_LEN_MASK
Definition: spi.c:61
static void spi_delay(struct tegra_spi_channel *spi, unsigned int bytes_remaining)
Definition: spi.c:313
#define SPI_CMD1_CS_SW_VAL
Definition: spi.c:50
#define SPI_STATUS_BLOCK_COUNT
Definition: spi.c:74
#define SPI_STATUS_BLOCK_COUNT_SHIFT
Definition: spi.c:75
#define SPI_STATUS_RDY
Definition: spi.c:71
static u32 rx_fifo_count(struct tegra_spi_channel *spi)
Definition: spi.c:388
#define SPI_DMA_CTL_BLOCK_SIZE_MASK
Definition: spi.c:107
#define SPI_FIFO_STATUS_RX_FIFO_EMPTY
Definition: spi.c:94
#define SPI_DMA_CTL_BLOCK_SIZE_SHIFT
Definition: spi.c:108
static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
Definition: spi.c:502
static unsigned int spi_byte_count(struct tegra_spi_channel *spi)
Definition: spi.c:299
static int fifo_error(struct tegra_spi_channel *spi)
Definition: spi.c:338
#define SPI_FIFO_STATUS_ERR
Definition: spi.c:86
#define SPI_FIFO_STATUS_RX_FIFO_FLUSH
Definition: spi.c:84
static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
Definition: spi.c:380
#define SPI_CMD1_CS_SEL_SHIFT
Definition: spi.c:44
#define SPI_CMD1_PACKED
Definition: spi.c:60
static int spi_ctrlr_claim_bus(const struct spi_slave *slave)
Definition: spi.c:192
static int xfer_finish(struct tegra_spi_channel *spi)
Definition: spi.c:687
static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout, size_t out_bytes, void *din, size_t in_bytes)
Definition: spi.c:700
#define SPI_CMD1_RX_EN
Definition: spi.c:58
static struct tegra_spi_channel *const to_tegra_spi(int bus)
Definition: spi.c:182
#define SPI_MAX_TRANSFER_BYTES_DMA
Definition: spi.c:29
#define SPI_MAX_TRANSFER_BYTES_FIFO
Definition: spi.c:28
#define SPI_FIFO_STATUS_RX_FIFO_OVF
Definition: spi.c:89
static void xfer_wait(struct tegra_spi_channel *spi)
Definition: spi.c:682
static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
Definition: spi.c:395
#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT
Definition: spi.c:81
#define SPI_CTRLR_DEFAULT_MAX_XFER_SIZE
Definition: spi-generic.h:102
static struct spi_slave slave
Definition: spiconsole.c:7
#define NULL
Definition: stddef.h:19
uint32_t u32
Definition: stdint.h:51
unsigned long uintptr_t
Definition: stdint.h:21
uint8_t u8
Definition: stdint.h:45
#define dcache_clean_invalidate_by_mva(addr, len)
Definition: storage.h:18
struct apb_dma_channel_regs * regs
Definition: dma.h:159
Definition: device.h:76
const struct spi_ctrlr * ctrlr
Definition: spi-generic.h:175
int(* claim_bus)(const struct spi_slave *slave)
Definition: spi-generic.h:149
unsigned int bus
Definition: spi-generic.h:41
unsigned int cs
Definition: spi-generic.h:42
enum spi_xfer_mode xfer_mode
Definition: spi.h:47
unsigned int req_sel
Definition: spi.h:40
struct tegra_spi_regs * regs
Definition: spi.h:36
struct apb_dma_channel * dma_in
Definition: spi.h:46
u8 * in_buf
Definition: spi.h:45
u8 * out_buf
Definition: spi.h:45
struct apb_dma_channel * dma_out
Definition: spi.h:46
struct spi_slave slave
Definition: spi.h:39
u32 tx_data
Definition: spi.h:17
u32 trans_status
Definition: spi.h:15
u32 command1
Definition: spi.h:11
u32 fifo_status
Definition: spi.h:16
u32 dma_blk
Definition: spi.h:20
u32 rx_data
Definition: spi.h:18
u32 tx_fifo
Definition: spi.h:22
u32 rx_fifo
Definition: spi.h:24
u32 dma_ctl
Definition: spi.h:19
u8 val
Definition: sys.c:300
int dma_start(struct apb_dma_channel *const channel)
Definition: dma.c:111
int dma_stop(struct apb_dma_channel *const channel)
Definition: dma.c:121
void dma_release(struct apb_dma_channel *const channel)
Definition: dma.c:92
int dma_busy(struct apb_dma_channel *const channel)
Definition: dma.c:48
struct apb_dma_channel *const dma_claim(void)
Definition: dma.c:59
#define APB_CSR_REQ_SEL_SHIFT
Definition: dma.h:70
#define APB_CSR_FLOW
Definition: dma.h:68
#define APB_BUS_WIDTH_MASK
Definition: dma.h:134
#define AHB_BURST_MASK
Definition: dma.h:125
#define APB_CSR_DIR
Definition: dma.h:66
#define APB_BUS_WIDTH_SHIFT
Definition: dma.h:135
#define TEGRA_DMA_ALIGN_BYTES
Definition: dma.h:13
#define APB_CSR_REQ_SEL_MASK
Definition: dma.h:69
@ APBDMA_SLAVE_SL2B3
Definition: dma.h:90
@ APBDMA_SLAVE_SL2B6
Definition: dma.h:101
@ APBDMA_SLAVE_SL2B4
Definition: dma.h:91
@ APBDMA_SLAVE_SL2B2
Definition: dma.h:89
@ APBDMA_SLAVE_SL2B1
Definition: dma.h:88
@ APBDMA_SLAVE_SL2B5
Definition: dma.h:100
#define APB_CSR_ONCE
Definition: dma.h:67
#define AHB_BURST_SHIFT
Definition: dma.h:126
void udelay(uint32_t us)
Definition: udelay.c:15
#define count