diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c --- a/sys/dev/mmc/host/dwmmc.c +++ b/sys/dev/mmc/host/dwmmc.c @@ -111,7 +111,8 @@ #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE) #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ - |SDMMC_INTMASK_RE) + |SDMMC_INTMASK_RE | SDMMC_INTMASK_FRUN \ + |SDMMC_INTMASK_HTO) #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ |SDMMC_INTMASK_HLE) @@ -124,6 +125,8 @@ #define DES0_OWN (1 << 31) /* OWN */ #define DES1_BS1_MASK 0x1fff +#define DES1_BS2_MASK 0x1fff +#define DES1_BS2_SHIFT 13 struct idmac_desc { uint32_t des0; /* control */ @@ -136,20 +139,23 @@ #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS) #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ /* - * Size field in DMA descriptor is 13 bits long (up to 4095 bytes), + * Size field in DMA descriptor is 13 bits long (up to 8191 bytes), * but must be a multiple of the data bus size.Additionally, we must ensure * that bus_dmamap_load() doesn't additionally fragments buffer (because it * is processed with page size granularity). Thus limit fragment size to half * of page. - * XXX switch descriptor format to array and use second buffer pointer for - * second half of page */ #define IDMAC_MAX_SIZE 2048 +/* + * In double-buffer structure we have two buffers per descriptor. + */ +#define IDMAC_BUF_SEGS (2 * IDMAC_DESC_SEGS) + /* * Busdma may bounce buffers, so we must reserve 2 descriptors * (on start and on end) for bounced fragments. */ -#define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE +#define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_BUF_SEGS - 2)) / MMC_SECTOR_SIZE static void dwmmc_next_operation(struct dwmmc_softc *); static int dwmmc_setup_bus(struct dwmmc_softc *, int); @@ -181,30 +187,50 @@ } static void -dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +dwmmc_desc_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { - struct dwmmc_softc *sc; - int idx; + struct dwmmc_softc *sc = (struct dwmmc_softc *)arg; + int idx = 0; - sc = arg; - dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); if (error != 0) panic("%s: error != 0 (%d)\n", __func__, error); - for (idx = 0; idx < nsegs; idx++) { - sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH; - sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK; - sc->desc_ring[idx].des2 = segs[idx].ds_addr; - - if (idx == 0) - sc->desc_ring[idx].des0 |= DES0_FS; + for (idx = 0; idx < nsegs / 2; idx++) { + bool first_seg = idx == 0; + bool last_seg = nsegs % 2 == 0 && idx == nsegs / 2 - 1; + + uint32_t len1 = segs[2 * idx].ds_len & DES1_BS1_MASK; + uint32_t len2 = segs[2 * idx + 1].ds_len & DES1_BS2_MASK; + + /* + * Set either DES0_ER and DES0_LD flags when we hit last + * descriptor or DES0_DIC in any other case. + */ + sc->desc_ring[idx].des0 = (first_seg ? DES0_FS : 0) | + (last_seg ? DES0_ER | DES0_LD : DES0_DIC); + sc->desc_ring[idx].des1 = len1 | (len2 << DES1_BS2_SHIFT); + sc->desc_ring[idx].des2 = segs[2 * idx].ds_addr; + sc->desc_ring[idx].des3 = segs[2 * idx + 1].ds_addr; + wmb(); + sc->desc_ring[idx].des0 |= DES0_OWN; + wmb(); + } - if (idx == (nsegs - 1)) { - sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); - sc->desc_ring[idx].des0 |= DES0_LD; - } + if (nsegs % 2 == 1) { + /* + * We have one extra segment. The question is: do we have + * more than one segment in total? + */ + bool first_seg = idx == 0; + + sc->desc_ring[idx].des0 = DES0_ER | DES0_LD | + (first_seg ? DES0_FS : 0); + sc->desc_ring[idx].des1 = segs[2 * idx].ds_len & DES1_BS1_MASK; + sc->desc_ring[idx].des2 = segs[2 * idx].ds_addr; + sc->desc_ring[idx].des3 = 0; wmb(); sc->desc_ring[idx].des0 |= DES0_OWN; + wmb(); } } @@ -234,8 +260,6 @@ dma_setup(struct dwmmc_softc *sc) { int error; - int nidx; - int idx; /* * Set up TX descriptor ring, descriptors, and dma maps. @@ -275,24 +299,14 @@ return (1); } - for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) { - sc->desc_ring[idx].des0 = DES0_CH; - sc->desc_ring[idx].des1 = 0; - nidx = (idx + 1) % IDMAC_DESC_SEGS; - sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ - (nidx * sizeof(struct idmac_desc)); - } - sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr; - sc->desc_ring[idx - 1].des0 |= DES0_ER; - error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 8, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */ - IDMAC_DESC_SEGS, /* nsegments */ + IDMAC_MAX_SIZE * IDMAC_BUF_SEGS, /* maxsize */ + IDMAC_BUF_SEGS, /* nsegments */ IDMAC_MAX_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ @@ -964,7 +978,7 @@ WRITE4(sc, SDMMC_CTRL, reg); reg = READ4(sc, SDMMC_BMOD); - reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); + reg &= ~(SDMMC_BMOD_DE); reg |= (SDMMC_BMOD_SWR); WRITE4(sc, SDMMC_BMOD, reg); @@ -985,8 +999,8 @@ WRITE4(sc, SDMMC_INTMASK, reg); dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len); err = bus_dmamap_load(sc->buf_tag, sc->buf_map, - data->data, data->len, dwmmc_ring_setup, - sc, BUS_DMA_NOWAIT); + data->data, data->len, dwmmc_desc_setup, sc, + BUS_DMA_NOWAIT); if (err != 0) panic("dmamap_load failed\n"); @@ -1015,6 +1029,7 @@ reg = READ4(sc, SDMMC_BMOD); reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); + reg &= ~(SDMMC_BMOD_DSL_MASK); WRITE4(sc, SDMMC_BMOD, reg); /* Start */ diff --git a/sys/dev/mmc/host/dwmmc_reg.h b/sys/dev/mmc/host/dwmmc_reg.h --- a/sys/dev/mmc/host/dwmmc_reg.h +++ b/sys/dev/mmc/host/dwmmc_reg.h @@ -113,6 +113,7 @@ #define SDMMC_RST_N 0x78 /* Hardware Reset Register */ #define SDMMC_BMOD 0x80 /* Bus Mode Register */ #define SDMMC_BMOD_DE (1 << 7) /* IDMAC Enable */ +#define SDMMC_BMOD_DSL_MASK 0x7C /* Descriptor Skip Length mask */ #define SDMMC_BMOD_FB (1 << 1) /* AHB Master Fixed Burst */ #define SDMMC_BMOD_SWR (1 << 0) /* Reset DMA */ #define SDMMC_PLDMND 0x84 /* Poll Demand Register */