From 7f71d2776b12d75dababf4dc1a98ca0d8a3205f4 Mon Sep 17 00:00:00 2001 From: Jon Lin Date: Thu, 30 Nov 2023 16:45:53 +0800 Subject: [PATCH] PCI: rockchip: dw: Support IRQ user In order to expand the number of interrupts and achieve independent services, a specific IRQ USER concept is proposed. For user: ioctl PCIE_EP_RAISE_IRQ_USER to raise a dedicated IRQ event ioctl PCIE_EP_POLL_IRQ_USER to check IRQ event status Change-Id: I2dd5792a6245d3918dc3d555fccd1601fb9fb22e Signed-off-by: Jon Lin --- drivers/misc/rockchip/pcie-rkep.c | 121 +++++++++++++++--- .../pci/controller/dwc/pcie-dw-ep-rockchip.c | 120 ++++++++++++----- include/uapi/linux/rk-pcie-ep.h | 24 +++- 3 files changed, 212 insertions(+), 53 deletions(-) diff --git a/drivers/misc/rockchip/pcie-rkep.c b/drivers/misc/rockchip/pcie-rkep.c index 06caac118e30..467dcc48534d 100644 --- a/drivers/misc/rockchip/pcie-rkep.c +++ b/drivers/misc/rockchip/pcie-rkep.c @@ -91,9 +91,10 @@ static DEFINE_MUTEX(rkep_mutex); #define RKEP_USER_MEM_SIZE SZ_64M #define PCIE_CFG_ELBI_APP_OFFSET 0xe00 +#define PCIE_CFG_ELBI_USER_DATA_OFF 0x10 + #define PCIE_ELBI_REG_NUM 0x2 -#define RKEP_EP_VIRTUAL_ID_MAX (8 * 4096) #define RKEP_EP_ELBI_TIEMOUT_US 100000 #define PCIE_RK3568_RC_DBI_BASE 0xf6000000 @@ -118,9 +119,10 @@ struct pcie_rkep { struct dma_trx_obj *dma_obj; struct pcie_ep_obj_info *obj_info; struct page *user_pages; /* Allocated physical memory for user space */ - struct fasync_struct *async; struct mutex dev_lock_mutex; DECLARE_BITMAP(virtual_id_bitmap, RKEP_EP_VIRTUAL_ID_MAX); + DECLARE_BITMAP(virtual_id_irq_bitmap, RKEP_EP_VIRTUAL_ID_MAX); + wait_queue_head_t wq_head; }; struct pcie_file { @@ -153,11 +155,11 @@ static int rkep_ep_request_virtual_id(struct pcie_file *pcie_file) mutex_unlock(&pcie_rkep->dev_lock_mutex); return -EINVAL; } - __set_bit(index, pcie_rkep->virtual_id_bitmap); + set_bit(index, pcie_rkep->virtual_id_bitmap); mutex_unlock(&pcie_rkep->dev_lock_mutex); mutex_lock(&pcie_file->file_lock_mutex); - __set_bit(index, pcie_file->child_vid_bitmap); + set_bit(index, pcie_file->child_vid_bitmap); mutex_unlock(&pcie_file->file_lock_mutex); dev_dbg(&pcie_rkep->pdev->dev, "request virtual id %d\n", index); @@ -191,11 +193,13 @@ static int rkep_ep_release_virtual_id(struct pcie_file *pcie_file, int index) return 0; } -static int rkep_ep_raise_elbi_irq_user(struct pcie_rkep *pcie_rkep, u32 interrupt_num) +static int rkep_ep_raise_elbi_irq(struct pcie_file *pcie_file, u32 interrupt_num) { + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; u32 index, off; int i, gap_us = 100; u32 val; + int ret; if (interrupt_num >= (PCIE_ELBI_REG_NUM * 16)) { dev_err(&pcie_rkep->pdev->dev, "elbi int num out of max count\n"); @@ -216,16 +220,57 @@ static int rkep_ep_raise_elbi_irq_user(struct pcie_rkep *pcie_rkep, u32 interrup if (i >= gap_us) dev_err(&pcie_rkep->pdev->dev, "elbi int is not clear, status=%x\n", val); - return pci_write_config_dword(pcie_rkep->pdev, PCIE_CFG_ELBI_APP_OFFSET + 4 * index, + mutex_lock(&pcie_file->file_lock_mutex); + ret = pci_write_config_dword(pcie_rkep->pdev, PCIE_CFG_ELBI_APP_OFFSET + 4 * index, (1 << (off + 16)) | (1 << off)); + mutex_unlock(&pcie_file->file_lock_mutex); + return ret; } -static int pcie_rkep_fasync(int fd, struct file *file, int mode) +static int rkep_ep_raise_irq_user_obj(struct pcie_file *pcie_file, u32 index) { - struct pcie_file *pcie_file = file->private_data; struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; + int ret; - return fasync_helper(fd, file, mode, &pcie_rkep->async); + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { + dev_err(&pcie_rkep->pdev->dev, "raise irq_user, virtual id %d out of range\n", index); + + return -EINVAL; + } + + pcie_rkep->obj_info->irq_type_ep = OBJ_IRQ_USER; + pcie_rkep->obj_info->irq_user_data_ep = index; + ret = rkep_ep_raise_elbi_irq(pcie_file, 0); + + return ret; +} + +static int rkep_ep_poll_irq_user(struct pcie_file *pcie_file, struct pcie_ep_obj_poll_virtual_id_cfg *cfg) +{ + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; + u32 index = cfg->virtual_id; + + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { + dev_err(&pcie_rkep->pdev->dev, "poll irq_user, virtual id %d out of range\n", index); + + return -EINVAL; + } + + cfg->poll_status = NSIGPOLL; + if (cfg->sync) { + wait_event_interruptible(pcie_rkep->wq_head, + test_bit(index, pcie_rkep->virtual_id_irq_bitmap)); + } else { + wait_event_interruptible_timeout(pcie_rkep->wq_head, + test_bit(index, pcie_rkep->virtual_id_irq_bitmap), + cfg->timeout_ms); + } + if (test_and_clear_bit(index, pcie_rkep->virtual_id_irq_bitmap)) + cfg->poll_status = POLL_IN; + + dev_dbg(&pcie_rkep->pdev->dev, "poll virtual id %d, ret=%d\n", index, cfg->poll_status); + + return 0; } static int pcie_rkep_open(struct inode *inode, struct file *file) @@ -253,8 +298,6 @@ static int pcie_rkep_release(struct inode *inode, struct file *file) struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; int index; - pcie_rkep_fasync(-1, file, 0);//TODO - while (1) { mutex_lock(&pcie_file->file_lock_mutex); index = find_first_bit(pcie_file->child_vid_bitmap, RKEP_EP_VIRTUAL_ID_MAX); @@ -526,6 +569,7 @@ static long pcie_rkep_ioctl(struct file *file, unsigned int cmd, unsigned long a struct pcie_ep_dma_cache_cfg cfg; struct pcie_ep_dma_block_req dma; void __user *uarg = (void __user *)args; + struct pcie_ep_obj_poll_virtual_id_cfg poll_cfg; int mmap_res; int ret; int index; @@ -566,7 +610,8 @@ static long pcie_rkep_ioctl(struct file *file, unsigned int cmd, unsigned long a case PCIE_EP_DMA_XFER_BLOCK: ret = copy_from_user(&dma, uarg, sizeof(dma)); if (ret) { - dev_err(&pcie_rkep->pdev->dev, "failed to get dma_data copy from userspace\n"); + dev_err(&pcie_rkep->pdev->dev, + "failed to get dma_data copy from userspace\n"); return -EFAULT; } ret = rkep_ep_dma_xfer(pcie_rkep, &dma); @@ -589,7 +634,8 @@ static long pcie_rkep_ioctl(struct file *file, unsigned int cmd, unsigned long a case PCIE_EP_RELEASE_VIRTUAL_ID: ret = copy_from_user(&index, uarg, sizeof(index)); if (ret) { - dev_err(&pcie_rkep->pdev->dev, "failed to get dma_data copy from userspace\n"); + dev_err(&pcie_rkep->pdev->dev, + "failed to get release data copy from userspace\n"); return -EFAULT; } ret = rkep_ep_release_virtual_id(pcie_file, index); @@ -600,13 +646,42 @@ static long pcie_rkep_ioctl(struct file *file, unsigned int cmd, unsigned long a return -EFAULT; } break; + case PCIE_EP_RAISE_IRQ_USER: + ret = copy_from_user(&index, uarg, sizeof(index)); + if (ret) { + dev_err(&pcie_rkep->pdev->dev, + "failed to get raise irq data copy from userspace\n"); + return -EFAULT; + } + + ret = rkep_ep_raise_irq_user_obj(pcie_file, index); + if (ret < 0) + return -EFAULT; + break; + case PCIE_EP_POLL_IRQ_USER: + ret = copy_from_user(&poll_cfg, uarg, sizeof(poll_cfg)); + if (ret) { + dev_err(&pcie_rkep->pdev->dev, + "failed to get poll irq data copy from userspace\n"); + + return -EFAULT; + } + + ret = rkep_ep_poll_irq_user(pcie_file, &poll_cfg); + if (ret < 0) + return -EFAULT; + + if (copy_to_user(argp, &poll_cfg, sizeof(poll_cfg))) + return -EFAULT; + break; case PCIE_EP_RAISE_ELBI: ret = copy_from_user(&index, uarg, sizeof(index)); if (ret) { - dev_err(&pcie_rkep->pdev->dev, "failed to get dma_data copy from userspace\n"); + dev_err(&pcie_rkep->pdev->dev, + "failed to get raise elbi data copy from userspace\n"); return -EFAULT; } - ret = rkep_ep_raise_elbi_irq_user(pcie_rkep, index); + ret = rkep_ep_raise_elbi_irq(pcie_file, index); if (ret < 0) { dev_err(&pcie_rkep->pdev->dev, "raise elbi %d failed, ret=%d\n", index, ret); @@ -642,7 +717,6 @@ static const struct file_operations pcie_rkep_fops = { .read = pcie_rkep_read, .unlocked_ioctl = pcie_rkep_ioctl, .mmap = pcie_rkep_mmap, - .fasync = pcie_rkep_fasync, .release = pcie_rkep_release, .llseek = default_llseek, }; @@ -765,7 +839,7 @@ static void pcie_rkep_start_dma_rd(struct dma_trx_obj *obj, struct dma_table *cu pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL, cur->start.asdword); } - // pcie_rkep_dma_debug(obj, cur); + /* pcie_rkep_dma_debug(obj, cur); */ } static void pcie_rkep_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off) @@ -808,7 +882,7 @@ static void pcie_rkep_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cu pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL, cur->start.asdword); } - // pcie_rkep_dma_debug(obj, cur); + /* pcie_rkep_dma_debug(obj, cur); */ } static void pcie_rkep_start_dma_dwc(struct dma_trx_obj *obj, struct dma_table *table) @@ -916,8 +990,8 @@ static int pcie_rkep_obj_handler(struct pcie_rkep *pcie_rkep, struct pci_dev *pd u32 irq_type; u32 chn; union int_clear clears; + u32 reg; - kill_fasync(&pcie_rkep->async, SIGIO, POLL_IN); irq_type = pcie_rkep->obj_info->irq_type_rc; if (irq_type == OBJ_IRQ_DMA) { /* DMA helper */ @@ -960,6 +1034,12 @@ static int pcie_rkep_obj_handler(struct pcie_rkep *pcie_rkep, struct pci_dev *pd } } } + } else if (irq_type == OBJ_IRQ_USER) { + reg = pcie_rkep->obj_info->irq_user_data_rc; + if (reg < RKEP_EP_VIRTUAL_ID_MAX) { + set_bit(reg, pcie_rkep->virtual_id_irq_bitmap); + wake_up_interruptible(&pcie_rkep->wq_head); + } } return 0; @@ -1094,7 +1174,7 @@ static int pcie_rkep_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!name) return -ENOMEM; - __set_bit(0, pcie_rkep->virtual_id_bitmap); + set_bit(0, pcie_rkep->virtual_id_bitmap); ret = pci_enable_device(pdev); if (ret) { @@ -1152,6 +1232,7 @@ static int pcie_rkep_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, pcie_rkep); + init_waitqueue_head(&pcie_rkep->wq_head); ret = pcie_rkep_request_irq(pcie_rkep, PCI_IRQ_MSI); if (ret) goto err_register_irq; diff --git a/drivers/pci/controller/dwc/pcie-dw-ep-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-ep-rockchip.c index 29a8f8ae11d6..1addd6c38875 100644 --- a/drivers/pci/controller/dwc/pcie-dw-ep-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-ep-rockchip.c @@ -94,12 +94,14 @@ #define PCIE_ELBI_LOCAL_BASE 0x200e00 #define PCIE_ELBI_APP_ELBI_INT_GEN0 0x0 -#define PCIE_ELBI_APP_ELBI_INT_GEN0_SIGIO BIT(0) +#define PCIE_ELBI_APP_ELBI_INT_GEN0_IRQ_USER BIT(0) #define PCIE_ELBI_APP_ELBI_INT_GEN1 0x4 #define PCIE_ELBI_LOCAL_ENABLE_OFF 0x8 +#define PCIE_ELBI_USER_DATA_OFF 0x10 + #define PCIE_DIRECT_SPEED_CHANGE BIT(17) #define PCIE_TYPE0_STATUS_COMMAND_REG 0x4 @@ -131,12 +133,14 @@ struct rockchip_pcie { u32 ib_target_size[PCIE_BAR_MAX_NUM]; void *ib_target_base[PCIE_BAR_MAX_NUM]; struct dma_trx_obj *dma_obj; - struct fasync_struct *async; phys_addr_t dbi_base_physical; struct pcie_ep_obj_info *obj_info; enum pcie_ep_mmap_resource cur_mmap_res; struct workqueue_struct *hot_rst_wq; struct work_struct hot_rst_work; + struct mutex file_mutex; + DECLARE_BITMAP(virtual_id_irq_bitmap, RKEP_EP_VIRTUAL_ID_MAX); + wait_queue_head_t wq_head; }; struct rockchip_pcie_misc_dev { @@ -642,6 +646,49 @@ static void rockchip_pcie_raise_msi_irq(struct rockchip_pcie *rockchip, u8 inter rockchip_pcie_writel_apb(rockchip, BIT(interrupt_num), PCIE_CLIENT_MSI_GEN_CON); } +static int rockchip_pcie_raise_irq_user(struct rockchip_pcie *rockchip, u32 index) +{ + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { + dev_err(rockchip->pci.dev, "raise irq_user, virtual id %d out of range\n", index); + + return -EINVAL; + } + + mutex_lock(&rockchip->file_mutex); + rockchip->obj_info->irq_type_rc = OBJ_IRQ_USER; + rockchip->obj_info->irq_user_data_rc = index; + rockchip_pcie_raise_msi_irq(rockchip, PCIe_CLIENT_MSI_OBJ_IRQ); + mutex_unlock(&rockchip->file_mutex); + + return 0; +} + +static int rockchip_pcie_poll_irq_user(struct rockchip_pcie *rockchip, struct pcie_ep_obj_poll_virtual_id_cfg *cfg) +{ + u32 index = cfg->virtual_id; + + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { + dev_err(rockchip->pci.dev, "poll irq_user, virtual id %d out of range\n", index); + + return -EINVAL; + } + + cfg->poll_status = NSIGPOLL; + if (cfg->sync) + wait_event_interruptible(rockchip->wq_head, + test_bit(index, rockchip->virtual_id_irq_bitmap)); + else + wait_event_interruptible_timeout(rockchip->wq_head, + test_bit(index, rockchip->virtual_id_irq_bitmap), + cfg->timeout_ms); + if (test_and_clear_bit(index, rockchip->virtual_id_irq_bitmap)) + cfg->poll_status = POLL_IN; + + dev_dbg(rockchip->pci.dev, "poll virtual id %d, ret=%d\n", index, cfg->poll_status); + + return 0; +} + static irqreturn_t rockchip_pcie_sys_irq_handler(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; @@ -651,14 +698,19 @@ static irqreturn_t rockchip_pcie_sys_irq_handler(int irq, void *arg) union int_status wr_status, rd_status; union int_clear clears; u32 reg, mask; - bool sigio = false; /* ELBI helper, only check the valid bits, and discard the rest interrupts */ elbi_reg = dw_pcie_readl_dbi(pci, PCIE_ELBI_LOCAL_BASE + PCIE_ELBI_APP_ELBI_INT_GEN0); - if (elbi_reg & PCIE_ELBI_APP_ELBI_INT_GEN0_SIGIO) { - sigio = true; - rockchip->obj_info->irq_type_ep = OBJ_IRQ_ELBI; + if (elbi_reg & PCIE_ELBI_APP_ELBI_INT_GEN0_IRQ_USER) { rockchip_pcie_elbi_clear(rockchip); + + if (rockchip->obj_info->irq_type_ep == OBJ_IRQ_USER) { + reg = rockchip->obj_info->irq_user_data_ep; + if (reg < RKEP_EP_VIRTUAL_ID_MAX) { + set_bit(reg, rockchip->virtual_id_irq_bitmap); + wake_up_interruptible(&rockchip->wq_head); + } + } goto out; } @@ -711,15 +763,9 @@ static irqreturn_t rockchip_pcie_sys_irq_handler(int irq, void *arg) rockchip->obj_info->irq_type_ep = OBJ_IRQ_DMA; rockchip->obj_info->dma_status_ep.wr |= wr_status.asdword; rockchip->obj_info->dma_status_ep.rd |= rd_status.asdword; - sigio = true; } out: - if (sigio) { - dev_dbg(rockchip->pci.dev, "SIGIO\n"); - kill_fasync(&rockchip->async, SIGIO, POLL_IN); - } - reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC); if (reg & BIT(2)) queue_work(rockchip->hot_rst_wq, &rockchip->hot_rst_work); @@ -944,13 +990,6 @@ static const struct dw_pcie_ops dw_pcie_ops = { .link_up = rockchip_pcie_link_up, }; -static int pcie_ep_fasync(int fd, struct file *file, int mode) -{ - struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data; - - return fasync_helper(fd, file, mode, &rockchip->async); -} - static int pcie_ep_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; @@ -962,18 +1001,14 @@ static int pcie_ep_open(struct inode *inode, struct file *file) return 0; } -static int pcie_ep_release(struct inode *inode, struct file *file) -{ - return pcie_ep_fasync(-1, file, 0); -} - static long pcie_ep_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data; struct pcie_ep_dma_cache_cfg cfg; void __user *uarg = (void __user *)arg; - int i, ret; + struct pcie_ep_obj_poll_virtual_id_cfg poll_cfg; enum pcie_ep_mmap_resource mmap_res; + int ret, index; switch (cmd) { case PCIE_DMA_CACHE_INVALIDE: @@ -998,8 +1033,7 @@ static long pcie_ep_ioctl(struct file *file, unsigned int cmd, unsigned long arg dw_pcie_writel_dbi(&rockchip->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK, 0xffffffff); break; - case PCIE_DMA_RAISE_MSI_OBJ_IRQ_USER: - rockchip->obj_info->irq_type_rc = OBJ_IRQ_USER; + case PCIE_EP_RAISE_MSI: rockchip_pcie_raise_msi_irq(rockchip, PCIe_CLIENT_MSI_OBJ_IRQ); break; case PCIE_EP_SET_MMAP_RESOURCE: @@ -1016,6 +1050,34 @@ static long pcie_ep_ioctl(struct file *file, unsigned int cmd, unsigned long arg rockchip->cur_mmap_res = mmap_res; break; + case PCIE_EP_RAISE_IRQ_USER: + ret = copy_from_user(&index, uarg, sizeof(index)); + if (ret) { + dev_err(rockchip->pci.dev, + "failed to get raise irq data copy from userspace\n"); + return -EFAULT; + } + + ret = rockchip_pcie_raise_irq_user(rockchip, index); + if (ret < 0) + return -EFAULT; + break; + case PCIE_EP_POLL_IRQ_USER: + ret = copy_from_user(&poll_cfg, uarg, sizeof(poll_cfg)); + if (ret) { + dev_err(rockchip->pci.dev, + "failed to get poll irq data copy from userspace\n"); + + return -EFAULT; + } + + ret = rockchip_pcie_poll_irq_user(rockchip, &poll_cfg); + if (ret < 0) + return -EFAULT; + + if (copy_to_user(uarg, &poll_cfg, sizeof(poll_cfg))) + return -EFAULT; + break; default: break; } @@ -1076,9 +1138,7 @@ static int pcie_ep_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations pcie_ep_ops = { .owner = THIS_MODULE, .open = pcie_ep_open, - .release = pcie_ep_release, .unlocked_ioctl = pcie_ep_ioctl, - .fasync = pcie_ep_fasync, .mmap = pcie_ep_mmap, }; @@ -1241,12 +1301,14 @@ already_linkup: rockchip->dma_obj->config_dma_func = rockchip_pcie_config_dma_dwc; rockchip->dma_obj->get_dma_status = rockchip_pcie_get_dma_status; } + mutex_init(&rockchip->file_mutex); /* Enable client ELBI interrupt */ rockchip_pcie_writel_apb(rockchip, 0x80000000, PCIE_CLIENT_INTR_MASK); /* Enable ELBI interrupt */ rockchip_pcie_local_elbi_enable(rockchip); + init_waitqueue_head(&rockchip->wq_head); ret = rockchip_pcie_request_sys_irq(rockchip, pdev); if (ret) goto deinit_phy; diff --git a/include/uapi/linux/rk-pcie-ep.h b/include/uapi/linux/rk-pcie-ep.h index fe1e677c1ad4..d89748eed925 100644 --- a/include/uapi/linux/rk-pcie-ep.h +++ b/include/uapi/linux/rk-pcie-ep.h @@ -72,6 +72,9 @@ enum pcie_ep_mmap_resource { PCIE_EP_MMAP_RESOURCE_MAX, }; +#define PCIE_EP_OBJ_INFO_MSI_DATA_NUM 0x8 +#define RKEP_EP_VIRTUAL_ID_MAX (PCIE_EP_OBJ_INFO_MSI_DATA_NUM * 32) /* 256 virtual_id */ + /* * rockchip ep device information which is store in BAR0 */ @@ -82,25 +85,38 @@ struct pcie_ep_obj_info { __u16 mode; __u16 submode; } devmode; - __u32 msi_data[0x8]; - __u8 reserved[0x1D4]; + __u32 msi_data[PCIE_EP_OBJ_INFO_MSI_DATA_NUM]; + __u8 reserved[0x1D0]; __u32 irq_type_rc; /* Generate in ep isr, valid only for rc, clear in rc */ struct pcie_ep_obj_irq_dma_status dma_status_rc; /* Generate in ep isr, valid only for rc, clear in rc */ __u32 irq_type_ep; /* Generate in ep isr, valid only for ep, clear in ep */ struct pcie_ep_obj_irq_dma_status dma_status_ep; /* Generate in ep isr, valid only for ep, clear in ep */ - __u32 obj_irq_user_data; /* OBJ_IRQ_USER userspace data */ + __u32 irq_user_data_rc; /* Generate in ep, valid only for rc, No need to clear */ + __u32 irq_user_data_ep; /* Generate in rc, valid only for ep, No need to clear */ +}; + +/* + * rockchip driver ep_obj poll ioctrl input param + */ +struct pcie_ep_obj_poll_virtual_id_cfg { + __u32 timeout_ms; + __u32 sync; + __u32 virtual_id; + __u32 poll_status; }; #define PCIE_BASE 'P' #define PCIE_DMA_CACHE_INVALIDE _IOW(PCIE_BASE, 1, struct pcie_ep_dma_cache_cfg) #define PCIE_DMA_CACHE_FLUSH _IOW(PCIE_BASE, 2, struct pcie_ep_dma_cache_cfg) #define PCIE_DMA_IRQ_MASK_ALL _IOW(PCIE_BASE, 3, int) -#define PCIE_DMA_RAISE_MSI_OBJ_IRQ_USER _IOW(PCIE_BASE, 4, int) +#define PCIE_EP_RAISE_MSI _IOW(PCIE_BASE, 4, int) #define PCIE_EP_SET_MMAP_RESOURCE _IOW(PCIE_BASE, 6, int) #define PCIE_EP_RAISE_ELBI _IOW(PCIE_BASE, 7, int) #define PCIE_EP_REQUEST_VIRTUAL_ID _IOR(PCIE_BASE, 16, int) #define PCIE_EP_RELEASE_VIRTUAL_ID _IOW(PCIE_BASE, 17, int) +#define PCIE_EP_RAISE_IRQ_USER _IOW(PCIE_BASE, 18, int) +#define PCIE_EP_POLL_IRQ_USER _IOW(PCIE_BASE, 19, struct pcie_ep_obj_poll_virtual_id_cfg) #define PCIE_EP_DMA_XFER_BLOCK _IOW(PCIE_BASE, 32, struct pcie_ep_dma_block_req) #endif