memory: rockchip: add dsmc driver

Change-Id: Ie3a7dbe89b34421d476b91a6021c3e81b10b591f
Signed-off-by: Zhihuan He <huan.he@rock-chips.com>
This commit is contained in:
Zhihuan He 2024-05-22 17:28:28 +08:00
commit 7f06ef1c10
9 changed files with 2605 additions and 0 deletions

View file

@ -236,6 +236,7 @@ config STM32_FMC2_EBI
devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on
SOCs containing the FMC2 External Bus Interface.
source "drivers/memory/rockchip/Kconfig"
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"

View file

@ -24,6 +24,7 @@ obj-$(CONFIG_MTK_SMI) += mtk-smi.o
obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_PL353_SMC) += pl353-smc.o
obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o
obj-$(CONFIG_ROCKCHIP_DSMC) += rockchip/
obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/

View file

@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
#
# Rockchip DSMC(Double Data Rate Serial Memory Controller) support
#
config ROCKCHIP_DSMC
tristate "Rockchip DSMC(Double Data Rate Serial Memory Controller) driver"
depends on ARCH_ROCKCHIP
help
For enable the Rockchip DSMC driver.

View file

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Rockchip DSMC drivers.
#
obj-$(CONFIG_ROCKCHIP_DSMC) += dsmc.o
dsmc-y += dsmc-controller.o dsmc-lb-device.o dsmc-host.o

View file

@ -0,0 +1,851 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2024 Rockchip Electronics Co., Ltd.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include "dsmc-host.h"
#include "dsmc-lb-slave.h"
#define MHZ (1000000)
#define REG_CLRSETBITS(dsmc, offset, clrbits, setbits) \
dsmc_modify_reg(dsmc, offset, clrbits, setbits)
/* psram id */
enum {
CYPRESS = 0x1,
ISSI = 0x3,
WINBOND = 0x6,
APM_PSRAM = 0xd,
};
struct dsmc_psram {
uint16_t id;
uint16_t protcl;
uint32_t mtr_timing;
};
/* DSMC psram support list */
static const struct dsmc_psram psram_info[] = {
/* Only APM is Xccela psram, others are Hyper psram */
{APM_PSRAM, OPI_XCCELA_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 0, 0)},
{WINBOND, HYPERBUS_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 2, 2)},
{CYPRESS, HYPERBUS_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 1, 1)},
{ISSI, HYPERBUS_PSRAM, MTR_CFG(2, 2, 0, 0, 0, 0, 1, 1)},
};
static inline void xccela_write_mr(struct dsmc_map *map,
uint32_t mr_num, uint8_t val)
{
writew(XCCELA_PSRAM_MR_SET(val), map->virt + XCCELA_PSRAM_MR(mr_num));
}
static inline uint8_t xccela_read_mr(struct dsmc_map *map, uint32_t mr_num)
{
return XCCELA_PSRAM_MR_GET(readw(map->virt +
XCCELA_PSRAM_MR(mr_num)));
}
static inline void hyper_write_mr(struct dsmc_map *map,
uint32_t mr_num, uint16_t val)
{
writew(val, map->virt + mr_num);
}
static inline uint16_t hyper_read_mr(struct dsmc_map *map, uint32_t mr_num)
{
return readw(map->virt + mr_num);
}
static inline void lb_write_cmn(struct dsmc_map *map,
uint32_t cmn_reg, uint32_t val)
{
writel(val, map->virt + cmn_reg);
}
static inline uint32_t lb_read_cmn(struct dsmc_map *map, uint32_t cmn_reg)
{
return readl(map->virt + cmn_reg);
}
static inline void dsmc_modify_reg(struct rockchip_dsmc *dsmc, uint32_t offset,
uint32_t clrbits, uint32_t setbits)
{
uint32_t value;
value = readl(dsmc->regs + offset);
value &= ~clrbits;
value |= setbits;
writel(value, dsmc->regs + offset);
}
static int find_attr_region(struct dsmc_config_cs *cfg, uint32_t attribute)
{
int region;
for (region = 0; region < DSMC_LB_MAX_RGN; region++) {
if (cfg->slv_rgn[region].attribute == attribute)
return region;
}
return -1;
}
static uint32_t cap_2_dev_size(uint32_t cap)
{
uint32_t mask = 0x80000000;
int i;
for (i = 31; i >= 0; i--) {
if (cap & mask)
return i;
mask >>= 1;
}
return 0;
}
static int dsmc_psram_id_detect(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t tmp, i;
int ret = -1;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
tmp = readl(dsmc->regs + DSMC_MCR(cs));
/* config to CR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_MASK << MCR_CRT_SHIFT),
(MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_CR_SPACE << MCR_CRT_SHIFT));
if (cfg->protcl == OPI_XCCELA_PSRAM) {
uint8_t mid;
/* reset AP memory psram */
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT),
(0x1 << VDMC_RESET_CMD_MODE_SHIFT));
/* write mr any value to trigger xccela psram reset */
xccela_write_mr(region_map, 0, 0x0);
udelay(200);
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT),
(0x0 << VDMC_RESET_CMD_MODE_SHIFT));
mid = xccela_read_mr(region_map, 1);
mid &= XCCELA_DEV_ID_MASK;
if (mid == APM_PSRAM)
ret = 0;
} else {
/* hyper psram get ID */
uint16_t mid;
mid = hyper_read_mr(region_map, HYPER_PSRAM_IR0);
mid &= HYPERBUS_DEV_ID_MASK;
for (i = 1; i < ARRAY_SIZE(psram_info); i++) {
if (mid == psram_info[i].id) {
ret = 0;
break;
}
}
}
/* config to memory space */
writel(tmp, dsmc->regs + DSMC_MCR(cs));
return ret;
}
static void dsmc_psram_bw_detect(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t tmp, col;
uint16_t ir0_ir1;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
if (cfg->protcl == OPI_XCCELA_PSRAM) {
col = 10;
if (dsmc->cfg.cap >= PSRAM_SIZE_16MBYTE)
cfg->io_width = MCR_IOWIDTH_X16;
else
cfg->io_width = MCR_IOWIDTH_X8;
} else {
tmp = readl(dsmc->regs + DSMC_MCR(cs));
/* config to CR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_MASK << MCR_CRT_SHIFT),
(MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_CR_SPACE << MCR_CRT_SHIFT));
/* hyper psram get IR0 */
ir0_ir1 = hyper_read_mr(region_map, HYPER_PSRAM_IR0);
col = ((ir0_ir1 >> IR0_COL_COUNT_SHIFT) & IR0_COL_COUNT_MASK) + 1;
ir0_ir1 = hyper_read_mr(region_map, HYPER_PSRAM_IR1);
if ((ir0_ir1 & IR1_DEV_IO_WIDTH_MASK) == IR1_DEV_IO_WIDTH_X16)
cfg->io_width = MCR_IOWIDTH_X16;
else
cfg->io_width = MCR_IOWIDTH_X8;
/* config to memory space */
writel(tmp, dsmc->regs + DSMC_MCR(cs));
}
cfg->col = col;
}
static int dsmc_psram_dectect(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t i = 0;
int ret = -1;
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
/* axi read do not response error */
REG_CLRSETBITS(dsmc, DSMC_AXICTL,
(AXICTL_RD_NO_ERR_MASK << AXICTL_RD_NO_ERR_SHIFT),
(0x1 << AXICTL_RD_NO_ERR_SHIFT));
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_DEVTYPE_MASK << MCR_DEVTYPE_SHIFT),
(MCR_DEVTYPE_HYPERRAM << MCR_DEVTYPE_SHIFT));
for (i = 0; i < ARRAY_SIZE(psram_info); i++) {
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_MID_MASK << VDMC_MID_SHIFT) |
(VDMC_PROTOCOL_MASK << VDMC_PROTOCOL_SHIFT),
(psram_info[i].id << VDMC_MID_SHIFT) |
(psram_info[i].protcl << VDMC_PROTOCOL_SHIFT));
writel(psram_info[i].mtr_timing,
dsmc->regs + DSMC_MTR(cs));
cfg->mid = psram_info[i].id;
cfg->protcl = psram_info[i].protcl;
cfg->mtr_timing = psram_info[i].mtr_timing;
if (!dsmc_psram_id_detect(dsmc, cs)) {
pr_info("DSMC: The cs%d %s PSRAM ID: 0x%x\n", cs,
(cfg->protcl == OPI_XCCELA_PSRAM) ? "XCCELA" : "HYPER",
psram_info[i].id);
ret = 0;
break;
}
}
if (i == ARRAY_SIZE(psram_info)) {
pr_err("DSMC: Unknown PSRAM device\n");
ret = -1;
} else {
dsmc_psram_bw_detect(dsmc, cs);
}
/* recovery axi read response */
REG_CLRSETBITS(dsmc, DSMC_AXICTL,
(AXICTL_RD_NO_ERR_MASK << AXICTL_RD_NO_ERR_SHIFT),
(0x0 << AXICTL_RD_NO_ERR_SHIFT));
return ret;
}
static uint32_t calc_ltcy_value(uint32_t latency)
{
if ((latency >= 5) && (latency <= 10))
return (latency - 5);
else
return (latency + 0xb);
}
static int dsmc_ctrller_cfg_for_lb(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t value = 0, i;
struct regions_config *slv_rgn;
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
writel(dsmc->cfg.clk_mode, dsmc->regs + DSMC_CLK_MD);
writel(MTR_CFG(3, 3, 1, 1, 0, 0,
calc_ltcy_value(cfg->rd_latency),
calc_ltcy_value(cfg->wr_latency)),
dsmc->regs + DSMC_MTR(cs));
writel(cfg->rgn_num / 2,
dsmc->regs + DSMC_SLV_RGN_DIV(cs));
for (i = 0; i < DSMC_LB_MAX_RGN; i++) {
slv_rgn = &cfg->slv_rgn[i];
if (!slv_rgn->status)
continue;
if (slv_rgn->dummy_clk_num >= 2)
value = (0x1 << RGNX_ATTR_DUM_CLK_EN_SHIFT) |
(0x1 << RGNX_ATTR_DUM_CLK_NUM_SHIFT);
else if (slv_rgn->dummy_clk_num >= 1)
value = (0x1 << RGNX_ATTR_DUM_CLK_EN_SHIFT) |
(0x0 << RGNX_ATTR_DUM_CLK_NUM_SHIFT);
else
value = 0x0 << RGNX_ATTR_DUM_CLK_EN_SHIFT;
writel((slv_rgn->attribute << RGNX_ATTR_SHIFT) |
(slv_rgn->cs0_ctrl << RGNX_ATTR_CTRL_SHIFT) |
(slv_rgn->cs0_be_ctrled <<
RGNX_ATTR_BE_CTRLED_SHIFT) | value |
(slv_rgn->ca_addr_width <<
RGNX_ATTR_ADDR_WIDTH_SHIFT),
dsmc->regs + DSMC_RGN0_ATTR(cs) + 4 * i);
}
/* clear and enable interrupt */
writel(INT_STATUS(cs), dsmc->regs + DSMC_INT_STATUS);
writel(INT_EN(cs), dsmc->regs + DSMC_INT_EN);
return 0;
}
static int dsmc_slv_cmn_rgn_config(struct rockchip_dsmc *dsmc,
struct regions_config *slv_rgn,
uint32_t rgn, uint32_t cs)
{
uint32_t tmp;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
tmp = lb_read_cmn(region_map, RGN_CMN_CON(rgn, 0));
if (slv_rgn->dummy_clk_num == 0) {
tmp &= ~(WR_DATA_CYC_EXTENDED_MASK << WR_DATA_CYC_EXTENDED_SHIFT);
} else if (slv_rgn->dummy_clk_num == 1) {
tmp |= slv_rgn->dummy_clk_num << WR_DATA_CYC_EXTENDED_SHIFT;
} else {
pr_err("DSMC: lb slave: dummy clk too large\n");
return -1;
}
tmp &= ~(RD_LATENCY_CYC_MASK << RD_LATENCY_CYC_SHIFT);
if ((cfg->rd_latency == 1) || (cfg->rd_latency == 2)) {
tmp |= cfg->rd_latency << RD_LATENCY_CYC_SHIFT;
} else {
pr_err("DSMC: lb slave: read latency value error\n");
return -1;
}
tmp &= ~(WR_LATENCY_CYC_MASK << WR_LATENCY_CYC_SHIFT);
if ((cfg->wr_latency == 1) || (cfg->wr_latency == 2)) {
tmp |= cfg->wr_latency << WR_LATENCY_CYC_SHIFT;
} else {
pr_err("DSMC: lb slave: write latency value error\n");
return -1;
}
tmp &= ~(CA_CYC_MASK << CA_CYC_SHIFT);
if (slv_rgn->ca_addr_width == RGNX_ATTR_32BIT_ADDR_WIDTH)
tmp |= CA_CYC_32BIT << CA_CYC_SHIFT;
else
tmp |= CA_CYC_16BIT << CA_CYC_SHIFT;
lb_write_cmn(region_map, RGN_CMN_CON(rgn, 0), tmp);
return 0;
}
static int dsmc_slv_cmn_config(struct rockchip_dsmc *dsmc,
struct regions_config *slv_rgn, uint32_t cs)
{
uint32_t tmp;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
tmp = lb_read_cmn(region_map, CMN_CON(0));
if (slv_rgn->dummy_clk_num == 0) {
tmp &= ~(WR_DATA_CYC_EXTENDED_MASK << WR_DATA_CYC_EXTENDED_SHIFT);
} else if (slv_rgn->dummy_clk_num == 1) {
tmp |= slv_rgn->dummy_clk_num << WR_DATA_CYC_EXTENDED_SHIFT;
} else {
pr_err("DSMC: lb slave: dummy clk too large\n");
return -1;
}
tmp &= ~(RD_LATENCY_CYC_MASK << RD_LATENCY_CYC_SHIFT);
if ((cfg->rd_latency == 1) || (cfg->rd_latency == 2)) {
tmp |= cfg->rd_latency << RD_LATENCY_CYC_SHIFT;
} else {
pr_err("DSMC: lb slave: read latency value error\n");
return -1;
}
tmp &= ~(CA_CYC_MASK << CA_CYC_SHIFT);
if (slv_rgn->ca_addr_width == RGNX_ATTR_32BIT_ADDR_WIDTH)
tmp |= CA_CYC_32BIT << CA_CYC_SHIFT;
else
tmp |= CA_CYC_16BIT << CA_CYC_SHIFT;
lb_write_cmn(region_map, CMN_CON(0), tmp);
tmp = lb_read_cmn(region_map, CMN_CON(3));
tmp |= 0x1 << RDYN_GEN_CTRL_SHIFT;
tmp &= ~(DATA_WIDTH_MASK << DATA_WIDTH_SHIFT);
tmp |= cfg->io_width << DATA_WIDTH_SHIFT;
lb_write_cmn(region_map, CMN_CON(3), tmp);
return 0;
}
static int dsmc_lb_cmn_config(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t tmp, i;
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
struct regions_config *slv_rgn;
int ret = 0;
tmp = readl(dsmc->regs + DSMC_MCR(cs));
/* config to CR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_MASK << MCR_CRT_SHIFT),
(MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_CR_SPACE << MCR_CRT_SHIFT));
slv_rgn = &cfg->slv_rgn[0];
ret = dsmc_slv_cmn_config(dsmc, slv_rgn, cs);
for (i = 0; i < DSMC_LB_MAX_RGN; i++) {
slv_rgn = &cfg->slv_rgn[i];
if (!slv_rgn->status)
continue;
ret = dsmc_slv_cmn_rgn_config(dsmc, slv_rgn, i, cs);
if (ret)
break;
}
/* config to memory space */
writel(tmp, dsmc->regs + DSMC_MCR(cs));
return ret;
}
static void dsmc_lb_csr_config(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t mcr_tmp, rgn_attr_tmp;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
mcr_tmp = readl(dsmc->regs + DSMC_MCR(cs));
rgn_attr_tmp = readl(dsmc->regs + DSMC_RGN0_ATTR(cs));
/* config to slave CSR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
MCR_CRT_MASK << MCR_CRT_SHIFT,
MCR_CRT_MEM_SPACE << MCR_CRT_SHIFT);
REG_CLRSETBITS(dsmc, DSMC_RGN0_ATTR(cs),
RGNX_ATTR_MASK << RGNX_ATTR_SHIFT,
RGNX_ATTR_REG << RGNX_ATTR_SHIFT);
/* enable all s2h interrupt */
writel(0xffffffff, region_map->virt + LBC_S2H_INT_STA_EN);
writel(0xffffffff, region_map->virt + LBC_S2H_INT_STA_SIG_EN);
/* clear all s2h interrupt */
writel(LBC_S2H_INT_STA_MASK << LBC_S2H_INT_STA_SHIFT,
region_map->virt + LBC_S2H_INT_STA);
/* config to normal memory space */
writel(mcr_tmp, dsmc->regs + DSMC_MCR(cs));
writel(rgn_attr_tmp, dsmc->regs + DSMC_RGN0_ATTR(cs));
}
static void dsmc_cfg_latency(uint32_t rd_ltcy, uint32_t wr_ltcy,
struct rockchip_dsmc *dsmc, uint32_t cs)
{
rd_ltcy = calc_ltcy_value(rd_ltcy);
wr_ltcy = calc_ltcy_value(wr_ltcy);
REG_CLRSETBITS(dsmc, DSMC_MTR(cs),
(MTR_RLTCY_MASK << MTR_RLTCY_SHIFT) |
(MTR_WLTCY_MASK << MTR_WLTCY_SHIFT),
(rd_ltcy << MTR_RLTCY_SHIFT) |
(wr_ltcy << MTR_WLTCY_SHIFT));
}
static int dsmc_psram_cfg(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t latency, mcr, tmp;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs];
mcr = readl(dsmc->regs + DSMC_MCR(cs));
/* config to CR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_MASK << MCR_CRT_SHIFT),
(MCR_IOWIDTH_X8 << MCR_IOWIDTH_SHIFT) |
(MCR_CRT_CR_SPACE << MCR_CRT_SHIFT));
if (cs_cfg->protcl == OPI_XCCELA_PSRAM) {
/* Xccela psram init */
uint8_t mr_tmp;
mr_tmp = xccela_read_mr(region_map, 0);
tmp = cs_cfg->rd_latency - 3;
mr_tmp = (mr_tmp & (~(XCCELA_MR0_RL_MASK << XCCELA_MR0_RL_SHIFT))) |
(tmp << XCCELA_MR0_RL_SHIFT);
mr_tmp |= XCCELA_MR0_RL_TYPE_VARIABLE << XCCELA_MR0_RL_TYPE_SHIFT;
xccela_write_mr(region_map, 0, mr_tmp);
mr_tmp = xccela_read_mr(region_map, 4);
latency = cs_cfg->wr_latency;
if (latency == 3)
tmp = 0;
else if (latency == 5)
tmp = 2;
else if (latency == 7)
tmp = 1;
else
tmp = latency;
mr_tmp = (mr_tmp & (~(XCCELA_MR4_WL_MASK << XCCELA_MR4_WL_SHIFT))) |
(tmp << XCCELA_MR4_WL_SHIFT);
xccela_write_mr(region_map, 4, mr_tmp);
dsmc_cfg_latency(cs_cfg->rd_latency, cs_cfg->wr_latency, dsmc, cs);
mr_tmp = xccela_read_mr(region_map, 8);
if (cs_cfg->io_width == MCR_IOWIDTH_X16) {
mr_tmp |= XCCELA_MR8_IO_TYPE_X16 << XCCELA_MR8_IO_TYPE_SHIFT;
} else {
mr_tmp &= (~(XCCELA_MR8_IO_TYPE_MASK << XCCELA_MR8_IO_TYPE_SHIFT));
mr_tmp |= XCCELA_MR8_IO_TYPE_X8 << XCCELA_MR8_IO_TYPE_SHIFT;
}
mr_tmp &= (~(XCCELA_MR8_BL_MASK << XCCELA_MR8_BL_SHIFT));
if (cs_cfg->wrap_size == MCR_WRAPSIZE_8_CLK)
mr_tmp |= (XCCELA_MR8_BL_8_CLK << XCCELA_MR8_BL_SHIFT);
else if (cs_cfg->wrap_size == MCR_WRAPSIZE_16_CLK)
mr_tmp |= (XCCELA_MR8_BL_16_CLK << XCCELA_MR8_BL_SHIFT);
else if (cs_cfg->wrap_size == MCR_WRAPSIZE_32_CLK)
mr_tmp |= (XCCELA_MR8_BL_32_CLK << XCCELA_MR8_BL_SHIFT);
xccela_write_mr(region_map, 8, mr_tmp);
} else {
/* Hyper psram init */
uint16_t cr_tmp;
cr_tmp = hyper_read_mr(region_map, HYPER_PSRAM_CR0);
latency = cs_cfg->wr_latency;
if (latency == 3)
tmp = 0xe;
else if (latency == 4)
tmp = 0xf;
else
tmp = latency - 5;
cr_tmp = (cr_tmp & (~(CR0_INITIAL_LATENCY_MASK << CR0_INITIAL_LATENCY_SHIFT))) |
(tmp << CR0_INITIAL_LATENCY_SHIFT);
cr_tmp = (cr_tmp & (~(CR0_BURST_LENGTH_MASK << CR0_BURST_LENGTH_SHIFT)));
if (cs_cfg->wrap_size == MCR_WRAPSIZE_8_CLK)
cr_tmp |= (CR0_BURST_LENGTH_8_CLK << CR0_BURST_LENGTH_SHIFT);
else if (cs_cfg->wrap_size == MCR_WRAPSIZE_16_CLK)
cr_tmp |= (CR0_BURST_LENGTH_16_CLK << CR0_BURST_LENGTH_SHIFT);
else if (cs_cfg->wrap_size == MCR_WRAPSIZE_32_CLK)
cr_tmp |= (CR0_BURST_LENGTH_32_CLK << CR0_BURST_LENGTH_SHIFT);
hyper_write_mr(region_map, HYPER_PSRAM_CR0, cr_tmp);
dsmc_cfg_latency(latency, latency, dsmc, cs);
cr_tmp = hyper_read_mr(region_map, HYPER_PSRAM_CR1);
cr_tmp = (cr_tmp & (~(CR1_CLOCK_TYPE_MASK << CR1_CLOCK_TYPE_SHIFT))) |
(CR1_CLOCK_TYPE_DIFF_CLK << CR1_CLOCK_TYPE_SHIFT);
hyper_write_mr(region_map, HYPER_PSRAM_CR1, cr_tmp);
}
/* config to memory space */
writel(mcr, dsmc->regs + DSMC_MCR(cs));
return 0;
}
static int dsmc_psram_init(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t latency;
struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs];
uint32_t mhz = dsmc->cfg.freq_hz / MHZ;
if (mhz <= 66) {
latency = 3;
} else if (mhz <= 100) {
latency = 4;
} else if (mhz <= 133) {
latency = 5;
} else if (mhz <= 166) {
latency = 6;
} else if (mhz <= 200) {
latency = 7;
} else {
pr_err("DSMC: PSRAM frequency do not support!\n");
return -1;
}
cs_cfg->rd_latency = cs_cfg->wr_latency = latency;
dsmc_psram_cfg(dsmc, cs);
return 0;
}
static int dsmc_ctrller_cfg_for_psram(struct rockchip_dsmc *dsmc, uint32_t cs)
{
int ret = 0;
struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs];
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
MCR_DEVTYPE_MASK << MCR_DEVTYPE_SHIFT,
MCR_DEVTYPE_HYPERRAM << MCR_DEVTYPE_SHIFT);
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_MID_MASK << VDMC_MID_SHIFT) |
(VDMC_PROTOCOL_MASK << VDMC_PROTOCOL_SHIFT),
(cs_cfg->mid << VDMC_MID_SHIFT) |
(cs_cfg->protcl << VDMC_PROTOCOL_SHIFT));
writel(cs_cfg->mtr_timing,
dsmc->regs + DSMC_MTR(cs));
ret = dsmc_psram_init(dsmc, cs);
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_WRAPSIZE_MASK << MCR_WRAPSIZE_SHIFT),
(cs_cfg->wrap_size << MCR_WRAPSIZE_SHIFT));
return ret;
}
static void dsmc_psram_remodify_timing(struct rockchip_dsmc *dsmc, uint32_t cs)
{
uint32_t max_length = 511, tcmd = 3;
uint32_t tcsm, tmp;
uint32_t mhz = dsmc->cfg.freq_hz / MHZ;
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
struct dsmc_config_cs *cs_cfg = &dsmc->cfg.cs_cfg[cs];
if (cs_cfg->mid == APM_PSRAM) {
/* for extended temp */
if (region_map->size <= 0x400000)
tcsm = DSMC_DEC_TCEM_2_5U;
else if (region_map->size <= 0x1000000)
tcsm = DSMC_DEC_TCEM_3U;
else
tcsm = DSMC_DEC_TCEM_0_5U;
} else {
tcsm = DSMC_DEV_TCSM_1U;
}
tmp = (tcsm * mhz + 999) / 1000;
tmp = tmp - tcmd - 2 * cs_cfg->wr_latency - 4;
if (tmp > max_length)
tmp = max_length;
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_MAXEN_MASK << MCR_MAXEN_SHIFT) |
(MCR_MAXLEN_MASK << MCR_MAXLEN_SHIFT) |
(MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT),
(MCR_MAX_LENGTH_EN << MCR_MAXEN_SHIFT) |
(tmp << MCR_MAXLEN_SHIFT) |
(cs_cfg->io_width << MCR_IOWIDTH_SHIFT));
if (cs_cfg->io_width == MCR_IOWIDTH_X16)
tmp = cs_cfg->col - 2;
else
tmp = cs_cfg->col - 1;
REG_CLRSETBITS(dsmc, DSMC_BDRTCR(cs),
(BDRTCR_COL_BIT_NUM_MASK << BDRTCR_COL_BIT_NUM_SHIFT) |
(BDRTCR_WR_BDR_XFER_EN_MASK << BDRTCR_WR_BDR_XFER_EN_SHIFT) |
(BDRTCR_RD_BDR_XFER_EN_MASK << BDRTCR_RD_BDR_XFER_EN_SHIFT),
((tmp - 6) << BDRTCR_COL_BIT_NUM_SHIFT) |
(BDRTCR_WR_BDR_XFER_EN << BDRTCR_WR_BDR_XFER_EN_SHIFT) |
(BDRTCR_RD_BDR_XFER_EN << BDRTCR_RD_BDR_XFER_EN_SHIFT));
}
static void dsmc_lb_dma_clear_s2h_intrupt(struct rockchip_dsmc *dsmc, uint32_t cs)
{
int region, manual = 0;
uint32_t mcr_tmp, rgn_attr_tmp;
struct dsmc_map *map;
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
region = find_attr_region(cfg, RGNX_ATTR_REG);
if (region < 0) {
manual = -1;
region = 0;
}
if (manual) {
mcr_tmp = readl(dsmc->regs + DSMC_MCR(cs));
rgn_attr_tmp = readl(dsmc->regs + DSMC_RGN0_ATTR(cs));
/* config to slave CSR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
MCR_CRT_MASK << MCR_CRT_SHIFT,
MCR_CRT_MEM_SPACE << MCR_CRT_SHIFT);
REG_CLRSETBITS(dsmc, DSMC_RGN0_ATTR(cs),
RGNX_ATTR_MASK << RGNX_ATTR_SHIFT,
RGNX_ATTR_REG << RGNX_ATTR_SHIFT);
}
map = &dsmc->cs_map[cs].region_map[region];
/* clear all s2h interrupt */
writel(0x1 << S2H_INT_FOR_DMA_NUM,
map->virt + LBC_S2H_INT_STA);
if (manual) {
/* config to normal memory space */
writel(mcr_tmp, dsmc->regs + DSMC_MCR(cs));
writel(rgn_attr_tmp, dsmc->regs + DSMC_RGN0_ATTR(cs));
}
}
void rockchip_dsmc_lb_dma_hw_mode_dis(struct rockchip_dsmc *dsmc)
{
uint32_t cs = dsmc->xfer.ops_cs;
/* clear dsmc interrupt */
writel(INT_STATUS(cs), dsmc->regs + DSMC_INT_STATUS);
/* disable dma request */
writel(DMA_REQ_DIS(cs), dsmc->regs + DSMC_DMA_EN);
dsmc_lb_dma_clear_s2h_intrupt(dsmc, cs);
}
EXPORT_SYMBOL(rockchip_dsmc_lb_dma_hw_mode_dis);
int rockchip_dsmc_lb_dma_trigger_by_host(struct rockchip_dsmc *dsmc, uint32_t cs)
{
int region, manual = 0;
uint32_t mcr_tmp, rgn_attr_tmp, flag_tmp;
struct dsmc_map *map;
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
region = find_attr_region(cfg, RGNX_ATTR_REG);
if (region < 0) {
manual = -1;
region = 0;
}
if (manual) {
mcr_tmp = readl(dsmc->regs + DSMC_MCR(cs));
rgn_attr_tmp = readl(dsmc->regs + DSMC_RGN0_ATTR(cs));
/* config to slave CSR space */
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_CRT_MASK << MCR_CRT_SHIFT),
(MCR_CRT_MEM_SPACE << MCR_CRT_SHIFT));
REG_CLRSETBITS(dsmc, DSMC_RGN0_ATTR(cs),
RGNX_ATTR_MASK << RGNX_ATTR_SHIFT,
RGNX_ATTR_REG << RGNX_ATTR_SHIFT);
}
map = &dsmc->cs_map[cs].region_map[region];
/*
* write (readl(LBC_CON(15)) + 1) to LBC_CON15 to slave which will
* write APP_CON(S2H_INT_FOR_DMA_NUM) trigger a slave to host interrupt
*/
flag_tmp = readl(map->virt + LBC_CON(15));
writel(flag_tmp + 1, map->virt + LBC_CON(15));
if (manual) {
/* config to normal memory space */
writel(mcr_tmp, dsmc->regs + DSMC_MCR(cs));
writel(rgn_attr_tmp, dsmc->regs + DSMC_RGN0_ATTR(cs));
}
return 0;
}
EXPORT_SYMBOL(rockchip_dsmc_lb_dma_trigger_by_host);
int rockchip_dsmc_device_dectect(struct rockchip_dsmc *dsmc, uint32_t cs)
{
int ret = 0;
rockchip_dsmc_ctrller_init(dsmc, cs);
ret = dsmc_psram_dectect(dsmc, cs);
if (ret)
return ret;
return ret;
}
EXPORT_SYMBOL(rockchip_dsmc_device_dectect);
static void xccela_psram_reset(struct rockchip_dsmc *dsmc, uint32_t cs)
{
struct dsmc_map *region_map = &dsmc->cs_map[cs].region_map[0];
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT),
(0x1 << VDMC_RESET_CMD_MODE_SHIFT));
xccela_write_mr(region_map, XCCELA_PSRAM_MR(0), XCCELA_PSRAM_MR_SET(0x0));
udelay(200);
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_RESET_CMD_MODE_MASK << VDMC_RESET_CMD_MODE_SHIFT),
(0x0 << VDMC_RESET_CMD_MODE_SHIFT));
}
int rockchip_dsmc_psram_reinit(struct rockchip_dsmc *dsmc, uint32_t cs)
{
int ret = 0;
if (dsmc->cfg.cs_cfg[cs].protcl == OPI_XCCELA_PSRAM)
xccela_psram_reset(dsmc, cs);
ret = dsmc_ctrller_cfg_for_psram(dsmc, cs);
dsmc_psram_remodify_timing(dsmc, cs);
return ret;
}
EXPORT_SYMBOL(rockchip_dsmc_psram_reinit);
int rockchip_dsmc_ctrller_init(struct rockchip_dsmc *dsmc, uint32_t cs)
{
struct dsmc_config_cs *cfg = &dsmc->cfg.cs_cfg[cs];
writel(MRGTCR_READ_WRITE_MERGE_EN,
dsmc->regs + DSMC_MRGTCR(cs));
writel((0x1 << RDS_DLL0_CTL_RDS_0_CLK_SMP_SEL_SHIFT) |
(cfg->dll_num[0] << RDS_DLL0_CTL_RDS_0_CLK_DELAY_NUM_SHIFT),
dsmc->regs + DSMC_RDS_DLL0_CTL(cs));
writel((0x1 << RDS_DLL1_CTL_RDS_1_CLK_SMP_SEL_SHIFT) |
(cfg->dll_num[1] << RDS_DLL1_CTL_RDS_1_CLK_DELAY_NUM_SHIFT),
dsmc->regs + DSMC_RDS_DLL1_CTL(cs));
REG_CLRSETBITS(dsmc, DSMC_MCR(cs),
(MCR_ACS_MASK << MCR_ACS_SHIFT) |
(MCR_DEVTYPE_MASK << MCR_DEVTYPE_SHIFT) |
(MCR_IOWIDTH_MASK << MCR_IOWIDTH_SHIFT) |
(MCR_EXCLUSIVE_DQS_MASK << MCR_EXCLUSIVE_DQS_SHIFT) |
(MCR_WRAPSIZE_MASK << MCR_WRAPSIZE_SHIFT) |
(MCR_MAXEN_MASK << MCR_MAXEN_SHIFT) |
(MCR_MAXLEN_MASK << MCR_MAXLEN_SHIFT),
(cfg->acs << MCR_ACS_SHIFT) |
(MCR_DEVTYPE_HYPERRAM << MCR_DEVTYPE_SHIFT) |
(cfg->io_width << MCR_IOWIDTH_SHIFT) |
(cfg->exclusive_dqs << MCR_EXCLUSIVE_DQS_SHIFT) |
(cfg->wrap_size << MCR_WRAPSIZE_SHIFT) |
(cfg->max_length_en << MCR_MAXEN_SHIFT) |
(cfg->max_length << MCR_MAXLEN_SHIFT));
writel(cfg->wrap2incr_en, dsmc->regs + DSMC_WRAP2INCR(cs));
REG_CLRSETBITS(dsmc, DSMC_VDMC(cs),
(VDMC_LATENCY_FIXED_MASK << VDMC_LATENCY_FIXED_SHIFT) |
(VDMC_PROTOCOL_MASK << VDMC_PROTOCOL_SHIFT),
(VDMC_LATENCY_VARIABLE << VDMC_LATENCY_FIXED_SHIFT) |
(cfg->device_type << VDMC_PROTOCOL_SHIFT));
writel(cap_2_dev_size(dsmc->cfg.cap), dsmc->regs + DSMC_DEV_SIZE);
return 0;
}
EXPORT_SYMBOL(rockchip_dsmc_ctrller_init);
int rockchip_dsmc_lb_init(struct rockchip_dsmc *dsmc, uint32_t cs)
{
int ret = 0;
dsmc_ctrller_cfg_for_lb(dsmc, cs);
ret = dsmc_lb_cmn_config(dsmc, cs);
if (ret)
return ret;
dsmc_lb_csr_config(dsmc, cs);
return ret;
}
EXPORT_SYMBOL(rockchip_dsmc_lb_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhihuan He <huan.he@rock-chips.com>");
MODULE_DESCRIPTION("ROCKCHIP DSMC controller driver");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,403 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2024 Rockchip Electronics Co., Ltd.
*/
#ifndef __ROCKCHIP_DSMC_HOST_H
#define __ROCKCHIP_DSMC_HOST_H
#define DSMC_FPGA_WINBOND_X8 0
#define DSMC_FPGA_WINBOND_X16 1
#define DSMC_FPGA_APM_X8 0
#define DSMC_FPGA_APM_X16 0
#define DSMC_FPGA_DMAC_TEST 1
#define DSMC_MAX_SLAVE_NUM 4
#define DSMC_LB_MAX_RGN 4
#define DSMC_MEM_ATTRIBUTE_NO_CACHE 0
#define DSMC_MEM_ATTRIBUTE_CACHE 1
#define DSMC_MEM_ATTRIBUTE_WR_COM 2
#define DSMC_MAP_UNCACHE_SIZE (128 * 1024)
#define DSMC_MAP_BUFFERED_SIZE (128 * 1024)
/* DSMC register */
#define DSMC_VER 0x0000
#define DSMC_CSR 0x0008
#define DSMC_TAR 0x0010
#define DSMC_AXICTL 0x0014
#define DSMC_CLK_MD 0x0020
#define DSMC_DLL_DBG_CTRL 0x0028
#define DSMC_DEV_SIZE 0x0030
#define DSMC_INT_EN 0x0040
#define DSMC_INT_STATUS 0x0044
#define DSMC_INT_MASK 0x0048
#define DSMC_DMA_EN 0x0050
#define DSMC_DMA_REQ_NUM(n) (0x0054 + (0x4 * (n)))
#define DSMC_DMA_MUX 0x005c
#define DSMC_VDMC(n) (0x1000 * ((n) + 1))
#define DSMC_MCR(n) (0x1000 * ((n) + 1) + 0x10)
#define DSMC_MTR(n) (0x1000 * ((n) + 1) + 0x14)
#define DSMC_BDRTCR(n) (0x1000 * ((n) + 1) + 0x20)
#define DSMC_MRGTCR(n) (0x1000 * ((n) + 1) + 0x24)
#define DSMC_WRAP2INCR(n) (0x1000 * ((n) + 1) + 0x28)
#define DSMC_RDS_DLL0_CTL(n) (0x1000 * ((n) + 1) + 0x30)
#define DSMC_RDS_DLL1_CTL(n) (0x1000 * ((n) + 1) + 0x34)
#define DSMC_SLV_RGN_DIV(n) (0x1000 * ((n) + 1) + 0x40)
#define DSMC_RGN0_ATTR(n) (0x1000 * ((n) + 1) + 0x50)
#define DSMC_RGN1_ATTR(n) (0x1000 * ((n) + 1) + 0x54)
#define DSMC_RGN2_ATTR(n) (0x1000 * ((n) + 1) + 0x58)
#define DSMC_RGN3_ATTR(n) (0x1000 * ((n) + 1) + 0x5c)
/* AXICTL */
#define AXICTL_RD_NO_ERR_SHIFT 8
#define AXICTL_RD_NO_ERR_MASK 0x1
/* INT_EN */
#define INT_EN_SHIFT 0
#define INT_EN_MASK 0xf
#define INT_EN(cs) (0x1 << (cs))
/* INT_STATUS */
#define INT_STATUS_SHIFT 0
#define INT_STATUS_MASK 0xf
#define INT_STATUS(cs) (0x1 << (cs))
/* INT_MASK */
#define INT_MASK(cs) (0x1 << (cs))
#define INT_UNMASK(cs) (0x0 << (cs))
/* DMA_EN */
#define DMA_REQ_EN_SHIFT 0
#define DMA_REQ_EN_MASK 0x1
#define DMA_REQ_EN(cs) (0x1 << (cs))
#define DMA_REQ_DIS(cs) (0x0 << (cs))
/* VDMC */
#define VDMC_MID_SHIFT 0
#define VDMC_MID_MASK 0xF
#define VDMC_PROTOCOL_SHIFT 4
#define VDMC_PROTOCOL_MASK 0xF
#define VDMC_RESET_CMD_MODE_SHIFT 8
#define VDMC_RESET_CMD_MODE_MASK 0x1
#define VDMC_LATENCY_FIXED_SHIFT 9
#define VDMC_LATENCY_FIXED_MASK 0x1
#define VDMC_LATENCY_VARIABLE 0
#define VDMC_LATENCY_FIXED 1
#define DSMC_UNKNOWN_DEVICE 0x0
#define OPI_XCCELA_PSRAM 0x1
#define HYPERBUS_PSRAM 0x2
#define DSMC_LB_DEVICE 0x3
/* RDS_DLL0_CTL */
#define RDS_DLL0_CTL_RDS_0_CLK_DELAY_NUM_SHIFT 0
#define RDS_DLL0_CTL_RDS_0_CLK_SMP_SEL_SHIFT 31
/* RDS_DLL1_CTL */
#define RDS_DLL1_CTL_RDS_1_CLK_DELAY_NUM_SHIFT 0
#define RDS_DLL1_CTL_RDS_1_CLK_SMP_SEL_SHIFT 31
/* MCR */
#define MCR_WRAPSIZE_SHIFT 0
#define MCR_WRAPSIZE_MASK 0x3
#define MCR_WRAPSIZE_32_CLK 1
#define MCR_WRAPSIZE_8_CLK 2
#define MCR_WRAPSIZE_16_CLK 3
#define MCR_EXCLUSIVE_DQS_SHIFT 2
#define MCR_EXCLUSIVE_DQS_MASK 0x1
#define MCR_IOWIDTH_SHIFT 3
#define MCR_IOWIDTH_MASK 0x1
#define MCR_DEVTYPE_SHIFT 4
#define MCR_DEVTYPE_MASK 0x1
#define MCR_CRT_SHIFT 5
#define MCR_CRT_MASK 0x1
#define MCR_ACS_SHIFT 16
#define MCR_ACS_MASK 0x1
#define MCR_TCMO_SHIFT 17
#define MCR_TCMO_MASK 0x1
#define MCR_MAXLEN_SHIFT 18
#define MCR_MAXLEN_MASK 0x1FF
#define MCR_MAXEN_SHIFT 31
#define MCR_MAXEN_MASK 0x1
#define MCR_CRT_CR_SPACE 0x1
#define MCR_CRT_MEM_SPACE 0x0
#define MCR_IOWIDTH_X16 0x1
#define MCR_IOWIDTH_X8 0x0
#define MCR_DEVTYPE_HYPERRAM 0x1
#define MCR_MAX_LENGTH_EN 0x1
#define MCR_MAX_LENGTH 0x1ff
/* BDRTCR */
#define BDRTCR_COL_BIT_NUM_SHIFT 0
#define BDRTCR_COL_BIT_NUM_MASK 0x7
#define BDRTCR_WR_BDR_XFER_EN_SHIFT 4
#define BDRTCR_WR_BDR_XFER_EN_MASK 0x1
#define BDRTCR_WR_BDR_XFER_EN 1
#define BDRTCR_RD_BDR_XFER_EN_SHIFT 5
#define BDRTCR_RD_BDR_XFER_EN_MASK 0x1
#define BDRTCR_RD_BDR_XFER_EN 1
/* MRGTCR */
#define MRGTCR_READ_WRITE_MERGE_EN 0x3
/* MTR */
#define MTR_WLTCY_SHIFT 0
#define MTR_WLTCY_MASK 0xf
#define MTR_RLTCY_SHIFT 4
#define MTR_RLTCY_MASK 0xf
#define MTR_WCSH_SHIFT 8
#define MTR_RCSH_SHIFT 12
#define MTR_WCSS_SHIFT 16
#define MTR_RCSS_SHIFT 20
#define MTR_WCSHI_SHIFT 24
#define MTR_RCSHI_SHIFT 28
/* RGNX_ATTR */
#define RGNX_ATTR_SHIFT 0
#define RGNX_ATTR_MASK 0x3
#define RGNX_ATTR_REG 0x0
#define RGNX_ATTR_DPRA 0x1
#define RGNX_ATTR_NO_MERGE_FIFO 0x2
#define RGNX_ATTR_MERGE_FIFO 0x3
#define RGNX_ATTR_CTRL_SHIFT 4
#define RGNX_ATTR_BE_CTRLED_SHIFT 5
#define RGNX_ATTR_DUM_CLK_EN_SHIFT 6
#define RGNX_ATTR_DUM_CLK_NUM_SHIFT 7
#define RGNX_ATTR_32BIT_ADDR_WIDTH 0
#define RGNX_ATTR_16BIT_ADDR_WIDTH 1
#define RGNX_ATTR_ADDR_WIDTH_SHIFT 8
#define RGNX_STATUS_ENABLED (1)
#define RGNX_STATUS_DISABLED (0)
#define MTR_CFG(RCSHI, WCSHI, RCSS, WCSS, RCSH, WCSH, RLTCY, WLTCY) \
(((RCSHI) << MTR_RCSHI_SHIFT) | \
((WCSHI) << MTR_WCSHI_SHIFT) | \
((RCSS) << MTR_RCSS_SHIFT) | \
((WCSS) << MTR_WCSS_SHIFT) | \
((RCSH) << MTR_RCSH_SHIFT) | \
((WCSH) << MTR_WCSH_SHIFT) | \
((RLTCY) << MTR_RLTCY_SHIFT) | \
((WLTCY) << MTR_WLTCY_SHIFT))
#define APM_PSRAM_LATENCY_FIXED 0x1
#define APM_PSRAM_LATENCY_VARIABLE 0x0
#define DSMC_BURST_WRAPSIZE_32CLK 0x1
#define DSMC_BURST_WRAPSIZE_8CLK 0x2
#define DSMC_BURST_WRAPSIZE_16CLK 0x3
#define DSMC_DLL_EN 0x1
#define HYPER_PSRAM_IR0 (0x00)
#define HYPER_PSRAM_IR1 (0x02)
#define HYPER_PSRAM_CR0 (0x1000)
#define HYPER_PSRAM_CR1 (0x1002)
#define XCCELA_PSRAM_MR(n) (2 * (n))
#define XCCELA_PSRAM_MR_GET(n) (((n) >> 8) & 0xff)
#define XCCELA_PSRAM_MR_SET(n) (((n) & 0xff) << 8)
/* device id bit mask */
#define HYPERBUS_DEV_ID_MASK (0xf)
#define IR0_ROW_COUNT_SHIFT (0x8)
#define IR0_ROW_COUNT_MASK (0x1f)
#define IR0_COL_COUNT_SHIFT (0x4)
#define IR0_COL_COUNT_MASK (0xf)
#define IR1_DEV_IO_WIDTH_SHIFT (0)
#define IR1_DEV_IO_WIDTH_MASK (0xf)
#define IR1_DEV_IO_WIDTH_X16 (0x9)
#define CR0_INITIAL_LATENCY_SHIFT 4
#define CR0_INITIAL_LATENCY_MASK 0xf
#define CR0_FIXED_LATENCY_ENABLE_SHIFT 3
#define CR0_FIXED_LATENCY_ENABLE_MASK 0x1
#define CR0_FIXED_LATENCY_ENABLE_VARIABLE_LATENCY 0x0
#define CR0_FIXED_LATENCY_ENABLE_FIXED_LATENCY 0x1
#define CR0_BURST_LENGTH_SHIFT 0
#define CR0_BURST_LENGTH_MASK 0x3
#define CR0_BURST_LENGTH_64_CLK 0x0
#define CR0_BURST_LENGTH_32_CLK 0x1
#define CR0_BURST_LENGTH_8_CLK 0x2
#define CR0_BURST_LENGTH_16_CLK 0x3
#define CR1_CLOCK_TYPE_SHIFT 6
#define CR1_CLOCK_TYPE_MASK 0x1
#define CR1_CLOCK_TYPE_SINGLE_CLK 0x1
#define CR1_CLOCK_TYPE_DIFF_CLK 0x0
#define XCCELA_DEV_ID_MASK (0x1f)
#define XCCELA_MR0_RL_SHIFT (2)
#define XCCELA_MR0_RL_MASK (0x7)
#define XCCELA_MR0_RL_TYPE_SHIFT (5)
#define XCCELA_MR0_RL_TYPE_MASK (0x1)
#define XCCELA_MR0_RL_TYPE_FIXED (0x1)
#define XCCELA_MR0_RL_TYPE_VARIABLE (0x0)
#define XCCELA_MR2_DEV_DENSITY_MASK (0x7)
#define XCCELA_MR4_WL_SHIFT (5)
#define XCCELA_MR4_WL_MASK (0x7)
#define XCCELA_MR8_IO_TYPE_SHIFT (6)
#define XCCELA_MR8_IO_TYPE_MASK (0x1)
#define XCCELA_MR8_IO_TYPE_X16 (0x1)
#define XCCELA_MR8_IO_TYPE_X8 (0x0)
#define XCCELA_MR8_BL_SHIFT (0)
#define XCCELA_MR8_BL_MASK (0x7)
#define XCCELA_MR8_BL_32_CLK (0x2)
#define XCCELA_MR8_BL_16_CLK (0x1)
#define XCCELA_MR8_BL_8_CLK (0x0)
#define PSRAM_SIZE_32MBYTE (0x02000000)
#define PSRAM_SIZE_16MBYTE (0x01000000)
#define PSRAM_SIZE_8MBYTE (0x00800000)
#define PSRAM_SIZE_4MBYTE (0x00400000)
/* TCSM/TCEM */
#define DSMC_DEV_TCSM_4U (4000)
#define DSMC_DEV_TCSM_1U (1000)
#define DSMC_DEC_TCEM_2_5U (2500)
#define DSMC_DEC_TCEM_3U (3000)
#define DSMC_DEC_TCEM_0_5U (500)
#define RK3506_GRF_SOC_CON(n) (0x4 * (n))
#define GRF_DSMC_REQ0_SEL(n) ((0x1 << (15 + 16)) | ((n) << 15))
#define GRF_DSMC_REQ1_SEL(n) ((0x1 << (14 + 16)) | ((n) << 14))
#define GRF_DMAC0_CH10_SEL(n) ((0x1 << (7 + 16)) | ((n) << 7))
#define GRF_DMAC0_CH8_SEL(n) ((0x1 << (6 + 16)) | ((n) << 6))
#define GRF_DMAC0_CH3_SEL(n) ((0x1 << (3 + 16)) | ((n) << 3))
#define GRF_DMAC0_CH2_SEL(n) ((0x1 << (2 + 16)) | ((n) << 2))
#define RK3576_TPO_IOC_OFFSET (0x4000)
#define RK3576_GPIO3A_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x64)
#define RK3576_GPIO3B_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x68)
#define RK3576_GPIO3B_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x6c)
#define RK3576_GPIO3C_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x70)
#define RK3576_GPIO3C_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x74)
#define RK3576_GPIO3D_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x78)
#define RK3576_GPIO3D_IOMUX_SEL_H (RK3576_TPO_IOC_OFFSET + 0x7c)
#define RK3576_GPIO4A_IOMUX_SEL_L (RK3576_TPO_IOC_OFFSET + 0x80)
#define RK3576_IOMUX_SEL(v, s) (((v) << (s)) | (0xf << ((s) + 16)))
struct regions_config {
uint32_t attribute;
uint32_t ca_addr_width;
uint32_t dummy_clk_num;
uint32_t cs0_be_ctrled;
uint32_t cs0_ctrl;
uint32_t offset_range[2];
uint32_t status;
};
struct dsmc_config_cs {
uint16_t mid;
uint16_t protcl;
uint32_t device_type;
uint32_t mtr_timing;
uint32_t acs;
uint32_t exclusive_dqs;
uint32_t io_width;
uint32_t wrap_size;
uint32_t rd_latency;
uint32_t wr_latency;
uint32_t col;
uint32_t wrap2incr_en;
uint32_t max_length_en;
uint32_t max_length;
uint32_t rgn_num;
uint32_t dll_num[2];
struct regions_config slv_rgn[DSMC_LB_MAX_RGN];
};
struct dsmc_ctrl_config {
uint32_t clk_mode;
uint32_t freq_hz;
uint32_t ctrl_freq_hz;
uint32_t cap;
struct dsmc_config_cs cs_cfg[DSMC_MAX_SLAVE_NUM];
};
struct dsmc_map {
void *virt;
phys_addr_t phys;
size_t size;
};
struct dsmc_cs_map {
struct dsmc_map region_map[DSMC_LB_MAX_RGN];
};
struct dsmc_transfer {
uint32_t ops_cs;
struct dma_chan *dma_chan;
dma_addr_t src_addr;
dma_addr_t dst_addr;
size_t transfer_size;
u8 brst_size;
u8 brst_len;
atomic_t state;
};
struct rockchip_dsmc {
/* Hardware resources */
void __iomem *regs;
struct regmap *grf;
struct clk *aclk_root;
struct clk *aclk;
struct clk *pclk;
struct clk *clk_sys;
struct device *dev;
struct dma_chan *dma_req[DSMC_MAX_SLAVE_NUM];
struct dsmc_transfer xfer;
struct reset_control *areset;
struct reset_control *preset;
struct dsmc_cs_map cs_map[DSMC_MAX_SLAVE_NUM];
struct dsmc_ctrl_config cfg;
};
struct rockchip_dsmc_device {
struct dsmc_ops *ops;
struct rockchip_dsmc dsmc;
};
struct dsmc_ops {
int (*read)(struct rockchip_dsmc_device *dsmc_dev,
uint32_t cs, uint32_t region,
unsigned long addr, uint32_t *data);
int (*write)(struct rockchip_dsmc_device *dsmc_dev,
uint32_t cs, uint32_t region,
unsigned long addr, uint32_t val);
int (*copy_from)(struct rockchip_dsmc_device *dsmc_dev,
uint32_t cs, uint32_t region, uint32_t from,
dma_addr_t dst_phys, size_t size);
int (*copy_to)(struct rockchip_dsmc_device *dsmc_dev,
uint32_t cs, uint32_t region, dma_addr_t src_phys,
uint32_t to, size_t size);
int (*copy_from_state)(struct rockchip_dsmc_device *dsmc_dev);
int (*copy_to_state)(struct rockchip_dsmc_device *dsmc_dev);
};
int rockchip_dsmc_ctrller_init(struct rockchip_dsmc *dsmc, uint32_t cs);
int rockchip_dsmc_device_dectect(struct rockchip_dsmc *dsmc, uint32_t cs);
struct rockchip_dsmc_device *rockchip_dsmc_find_device_by_compat(const char *compat);
const char *rockchip_dsmc_get_compat(int index);
int rockchip_dsmc_lb_class_create(const char *name);
int rockchip_dsmc_lb_class_destroy(void);
void rockchip_dsmc_lb_dma_hw_mode_dis(struct rockchip_dsmc *dsmc);
int rockchip_dsmc_lb_dma_trigger_by_host(struct rockchip_dsmc *dsmc, uint32_t cs);
int rockchip_dsmc_lb_init(struct rockchip_dsmc *dsmc, uint32_t cs);
int rockchip_dsmc_psram_reinit(struct rockchip_dsmc *dsmc, uint32_t cs);
int rockchip_dsmc_register_lb_device(struct device *dev, uint32_t cs);
int rockchip_dsmc_unregister_lb_device(struct device *dev, uint32_t cs);
#endif /* __BUS_ROCKCHIP_ROCKCHIP_DSMC_HOST_H */

View file

@ -0,0 +1,192 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2024 Rockchip Electronics Co., Ltd.
*/
#include <linux/cdev.h>
#include <linux/io.h>
#include <linux/sysfs.h>
#include <linux/version_compat_defs.h>
#include "dsmc-host.h"
struct dsmc_cs {
struct cdev cdev[DSMC_LB_MAX_RGN];
};
static struct dsmc_cs cs_info[DSMC_MAX_SLAVE_NUM];
static struct class *dsmc_class;
static dev_t dsmc_devt;
static inline int get_cs_index(struct inode *inode)
{
return iminor(inode) / DSMC_LB_MAX_RGN;
}
static inline int get_mem_region_index(struct inode *inode)
{
return iminor(inode) % DSMC_LB_MAX_RGN;
}
static int dsmc_open(struct inode *inode, struct file *pfile)
{
struct rockchip_dsmc_device *dsmc_dev = NULL;
struct rockchip_dsmc *dsmc = NULL;
struct dsmc_config_cs *cfg;
struct dsmc_cs_map *map;
int cs_index, mem_region_index;
cs_index = get_cs_index(inode);
mem_region_index = get_mem_region_index(inode);
dsmc_dev = rockchip_dsmc_find_device_by_compat(rockchip_dsmc_get_compat(0));
if (dsmc_dev == NULL)
return -EINVAL;
dsmc = &dsmc_dev->dsmc;
if (cs_index < DSMC_MAX_SLAVE_NUM)
cfg = &dsmc->cfg.cs_cfg[cs_index];
else
return -EINVAL;
if ((cfg->device_type == DSMC_UNKNOWN_DEVICE) ||
(!cfg->slv_rgn[mem_region_index].status))
return -EINVAL;
map = &dsmc->cs_map[cs_index];
pfile->private_data = (void *)&map->region_map[mem_region_index];
return 0;
}
static int dsmc_release(struct inode *inode, struct file *pfile)
{
return 0;
}
static int dsmc_mmap(struct file *pfile, struct vm_area_struct *vma)
{
struct dsmc_map *region = (struct dsmc_map *)pfile->private_data;
unsigned long pfn;
unsigned long vm_size = 0;
if (!region)
return -EINVAL;
vm_flags_set(vma, VM_PFNMAP | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vm_size = vma->vm_end - vma->vm_start;
pfn = __phys_to_pfn(region->phys);
if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static const struct file_operations dsmc_fops = {
.owner = THIS_MODULE,
.open = dsmc_open,
.release = dsmc_release,
.mmap = dsmc_mmap,
};
int rockchip_dsmc_lb_class_create(const char *name)
{
int ret;
dsmc_class = class_create(THIS_MODULE, name);
if (IS_ERR(dsmc_class)) {
ret = PTR_ERR(dsmc_class);
return ret;
}
return 0;
}
EXPORT_SYMBOL(rockchip_dsmc_lb_class_create);
int rockchip_dsmc_lb_class_destroy(void)
{
if (!dsmc_class)
return 0;
class_destroy(dsmc_class);
dsmc_class = NULL;
return 0;
}
EXPORT_SYMBOL(rockchip_dsmc_lb_class_destroy);
int rockchip_dsmc_register_lb_device(struct device *dev, uint32_t cs)
{
int ret, j;
struct device *device_ret;
if (!dev || (cs >= DSMC_MAX_SLAVE_NUM) || (!dsmc_class))
return -EINVAL;
ret = alloc_chrdev_region(&dsmc_devt, 0,
DSMC_LB_MAX_RGN, "dsmc");
if (ret < 0) {
dev_err(dev, "Failed to alloc dsmc device region\n");
return -ENODEV;
}
for (j = 0; j < DSMC_LB_MAX_RGN; j++) {
device_ret = device_create(dsmc_class, NULL,
MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j),
NULL, "dsmc/cs%d/region%d", cs, j);
if (IS_ERR(device_ret)) {
dev_err(dev, "Failed to create device for cs%d region%d\n", cs, j);
ret = PTR_ERR(device_ret);
goto err_device_create;
}
cdev_init(&cs_info[cs].cdev[j], &dsmc_fops);
ret = cdev_add(&cs_info[cs].cdev[j],
MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j), 1);
if (ret) {
dev_err(dev, "Failed to add cdev for cs%d region%d\n", cs, j);
goto err_cdev_add;
}
}
return 0;
err_cdev_add:
device_destroy(dsmc_class, MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j));
err_device_create:
while (j-- > 0) {
device_destroy(dsmc_class, MKDEV(MAJOR(dsmc_devt), cs * DSMC_LB_MAX_RGN + j));
cdev_del(&cs_info[cs].cdev[j]);
}
unregister_chrdev_region(dsmc_devt, DSMC_LB_MAX_RGN);
return ret;
}
EXPORT_SYMBOL(rockchip_dsmc_register_lb_device);
int rockchip_dsmc_unregister_lb_device(struct device *dev, uint32_t cs)
{
int j;
if (!dev || (cs >= DSMC_MAX_SLAVE_NUM))
return -EINVAL;
for (j = 0; j < DSMC_LB_MAX_RGN; j++) {
device_destroy(dsmc_class,
MKDEV(MAJOR(dsmc_devt),
cs * DSMC_LB_MAX_RGN + j));
cdev_del(&cs_info->cdev[j]);
}
unregister_chrdev_region(dsmc_devt, DSMC_LB_MAX_RGN);
return 0;
}
EXPORT_SYMBOL(rockchip_dsmc_unregister_lb_device);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhihuan He <huan.he@rock-chips.com>");
MODULE_DESCRIPTION("ROCKCHIP DSMC local bus device");

View file

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2024 Rockchip Electronics Co., Ltd.
*/
#ifndef __ROCKCHIP_DSMC_LB_SLAVE_H
#define __ROCKCHIP_DSMC_LB_SLAVE_H
#define S2H_INT_FOR_DMA_NUM (15)
/* LBC_SLAVE_CMN register */
#define CMN_CON(n) (0x4 * (n))
#define CMN_STATUS (0x80)
#define RGN_CMN_CON(rgn, com) (0x100 + 0x100 * (rgn) + 0x4 * (com))
#define DBG_STATUS(n) (0x900 + 0x4 * (n))
/* LBC_SLAVE_CSR register */
#define APP_CON(n) (0x4 * (n))
#define APP_H2S_INT_STA (0x80)
#define APP_H2S_INT_STA_EN (0x84)
#define APP_H2S_INT_STA_SIG_EN (0x88)
#define LBC_CON(n) (0x100 + 0x4 * (n))
#define LBC_S2H_INT_STA (0x180)
#define LBC_S2H_INT_STA_EN (0x184)
#define LBC_S2H_INT_STA_SIG_EN (0x188)
#define AXI_WR_ADDR_BASE (0x800)
#define AXI_RD_ADDR_BASE (0x804)
#define DBG_STA(n) (0x900 + 0x4 * (n))
/* LBC_SLAVE_CMN_CMN_CON0 */
#define CA_CYC_16BIT (0)
#define CA_CYC_32BIT (1)
#define CA_CYC_SHIFT (0)
#define CA_CYC_MASK (0x1)
#define WR_LATENCY_CYC_SHIFT (4)
#define WR_LATENCY_CYC_MASK (0x7)
#define RD_LATENCY_CYC_SHIFT (8)
#define RD_LATENCY_CYC_MASK (0x7)
#define WR_DATA_CYC_EXTENDED_SHIFT (11)
#define WR_DATA_CYC_EXTENDED_MASK (0x1)
/* LBC_SLAVE_CMN_CMN_CON3 */
#define DATA_WIDTH_SHIFT (0)
#define DATA_WIDTH_MASK (0x1)
#define RDYN_GEN_CTRL_SHIFT (4)
#define RDYN_GEN_CTRL_MASK (0x1)
/* APP_H2S_INT_STA */
#define APP_H2S_INT_STA_SHIFT (0)
#define APP_H2S_INT_STA_MASK (0xFFFF)
/* APP_H2S_INT_STA_EN */
#define APP_H2S_INT_STA_EN_SHIFT (0)
#define APP_H2S_INT_STA_EN_MASK (0xFFFF)
/* APP_H2S_INT_STA_SIG_EN */
#define APP_H2S_INT_STA_SIG_EN_SHIFT (0)
#define APP_H2S_INT_STA_SIG_EN_MASK (0xFFFF)
/* LBC_S2H_INT_STA */
#define LBC_S2H_INT_STA_SHIFT (0)
#define LBC_S2H_INT_STA_MASK (0xFFFF)
/* LBC_S2H_INT_STA_EN */
#define LBC_S2H_INT_STA_EN_SHIFT (0)
#define LBC_S2H_INT_STA_EN_MASK (0xFFFF)
/* LBC_S2H_INT_STA_SIG_EN */
#define LBC_S2H_INT_STA_SIG_EN_SHIFT (0)
#define LBC_S2H_INT_STA_SIG_EN_MASK (0xFFFF)
#endif /* __BUS_ROCKCHIP_ROCKCHIP_DSMC_SLAVE_H */