qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH arm-devs v1 15/15] xilinx_spips: lqspi: Fix byte/mis


From: Peter Crosthwaite
Subject: [Qemu-devel] [PATCH arm-devs v1 15/15] xilinx_spips: lqspi: Fix byte/misaligned access
Date: Wed, 3 Apr 2013 14:33:07 +1000

The LQSPI bus attachment supports byte/halfword and misaligned
accesses. Fixed. Refactored the LQSPI cache to be byte-wise
instead of word wise accordingly.

Signed-off-by: Peter Crosthwaite <address@hidden>
---

 hw/xilinx_spips.c |   31 +++++++++++++++++--------------
 1 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/hw/xilinx_spips.c b/hw/xilinx_spips.c
index 32d8db8..cb45a9c 100644
--- a/hw/xilinx_spips.c
+++ b/hw/xilinx_spips.c
@@ -160,7 +160,7 @@ typedef struct {
 typedef struct {
     XilinxSPIPS parent_obj;
 
-    uint32_t lqspi_buf[LQSPI_CACHE_SIZE];
+    uint8_t lqspi_buf[LQSPI_CACHE_SIZE];
     hwaddr lqspi_cached_addr;
 } XilinxQSPIPS;
 
@@ -366,14 +366,12 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
     }
 }
 
-static inline void rx_data_bytes(XilinxSPIPS *s, uint32_t *value, int max)
+static inline void rx_data_bytes(XilinxSPIPS *s, uint8_t *value, int max)
 {
     int i;
 
-    *value = 0;
     for (i = 0; i < max && !fifo8_is_empty(&s->rx_fifo); ++i) {
-        uint32_t next = fifo8_pop(&s->rx_fifo) & 0xFF;
-        *value |= next << 8 * (s->regs[R_CONFIG] & ENDIAN ? 3-i : i);
+        value[i] = fifo8_pop(&s->rx_fifo);
     }
 }
 
@@ -383,6 +381,7 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
     XilinxSPIPS *s = opaque;
     uint32_t mask = ~0;
     uint32_t ret;
+    uint8_t rx_buf[4];
 
     addr >>= 2;
     switch (addr) {
@@ -412,7 +411,10 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr 
addr,
         mask = 0;
         break;
     case R_RX_DATA:
-        rx_data_bytes(s, &ret, s->num_txrx_bytes);
+        memset(rx_buf, 0, sizeof(rx_buf));
+        rx_data_bytes(s, rx_buf, s->num_txrx_bytes);
+        ret = s->regs[R_CONFIG] & ENDIAN ? cpu_to_be32(*(uint32_t *)rx_buf)
+                        : cpu_to_le32(*(uint32_t *)rx_buf);
         DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret);
         xilinx_spips_update_ixr(s);
         return ret;
@@ -525,7 +527,8 @@ lqspi_read(void *opaque, hwaddr addr, unsigned int size)
 
     if (addr >= q->lqspi_cached_addr &&
             addr <= q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) {
-        ret = q->lqspi_buf[(addr - q->lqspi_cached_addr) >> 2];
+        uint8_t *retp = &q->lqspi_buf[addr - q->lqspi_cached_addr];
+        ret = cpu_to_le32(*(uint32_t *)retp);
         DB_PRINT_L(1, "addr: %08x, data: %08x\n", (unsigned)addr,
                    (unsigned)ret);
         return ret;
@@ -571,13 +574,13 @@ lqspi_read(void *opaque, hwaddr addr, unsigned int size)
 
         DB_PRINT_L(0, "starting QSPI data read\n");
 
-        while (cache_entry < LQSPI_CACHE_SIZE / 4) {
-            for (i = 0; i < 16; ++i) {
-                tx_data_bytes(s, 0, 4);
+        while (cache_entry < LQSPI_CACHE_SIZE) {
+            for (i = 0; i < 64; ++i) {
+                tx_data_bytes(s, 0, 1);
             }
             xilinx_spips_flush_txfifo(s);
-            for (i = 0; i < 16; ++i) {
-                rx_data_bytes(s, &q->lqspi_buf[cache_entry++], 4);
+            for (i = 0; i < 64; ++i) {
+                rx_data_bytes(s, &q->lqspi_buf[cache_entry++], 1);
             }
         }
 
@@ -586,7 +589,7 @@ lqspi_read(void *opaque, hwaddr addr, unsigned int size)
         s->regs[R_CONFIG] = r_config_save;
         xilinx_spips_update_cs_lines(s);
 
-        q->lqspi_cached_addr = addr;
+        q->lqspi_cached_addr = flash_addr * num_effective_busses(s);
         return lqspi_read(opaque, addr, size);
     }
 }
@@ -595,7 +598,7 @@ static const MemoryRegionOps lqspi_ops = {
     .read = lqspi_read,
     .endianness = DEVICE_NATIVE_ENDIAN,
     .valid = {
-        .min_access_size = 4,
+        .min_access_size = 1,
         .max_access_size = 4
     }
 };
-- 
1.7.0.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]