From 88b8310d163c192ae2cfed4ca4b6260a027a889a Mon Sep 17 00:00:00 2001
From: GuEe-GUI <2991707448@qq.com>
Date: Wed, 27 Nov 2024 11:29:48 +0800
Subject: [PATCH] [DM/PCI] Add DesignWare PCIe RC/EP base drivers

Too many PCI controllers base on DesignWare PCIe.
This is a modules.

Signed-off-by: GuEe-GUI <2991707448@qq.com>
---
 components/drivers/pci/host/Kconfig           |   2 +
 components/drivers/pci/host/dw/Kconfig        |  13 +
 components/drivers/pci/host/dw/SConscript     |  21 +
 components/drivers/pci/host/dw/pcie-dw.c      | 645 +++++++++++++
 components/drivers/pci/host/dw/pcie-dw.h      | 440 +++++++++
 components/drivers/pci/host/dw/pcie-dw_ep.c   | 863 ++++++++++++++++++
 components/drivers/pci/host/dw/pcie-dw_host.c | 644 +++++++++++++
 .../drivers/pci/host/dw/pcie-dw_platfrom.c    | 295 ++++++
 8 files changed, 2923 insertions(+)
 create mode 100644 components/drivers/pci/host/dw/Kconfig
 create mode 100644 components/drivers/pci/host/dw/SConscript
 create mode 100644 components/drivers/pci/host/dw/pcie-dw.c
 create mode 100644 components/drivers/pci/host/dw/pcie-dw.h
 create mode 100644 components/drivers/pci/host/dw/pcie-dw_ep.c
 create mode 100644 components/drivers/pci/host/dw/pcie-dw_host.c
 create mode 100644 components/drivers/pci/host/dw/pcie-dw_platfrom.c

diff --git a/components/drivers/pci/host/Kconfig b/components/drivers/pci/host/Kconfig
index 73b45782e7c..221c997ebcf 100644
--- a/components/drivers/pci/host/Kconfig
+++ b/components/drivers/pci/host/Kconfig
@@ -8,3 +8,5 @@ config RT_PCI_HOST_GENERIC
     depends on RT_PCI_ECAM
     select RT_PCI_HOST_COMMON
     default y
+
+rsource "dw/Kconfig"
diff --git a/components/drivers/pci/host/dw/Kconfig b/components/drivers/pci/host/dw/Kconfig
new file mode 100644
index 00000000000..e76011b3871
--- /dev/null
+++ b/components/drivers/pci/host/dw/Kconfig
@@ -0,0 +1,13 @@
+config RT_PCI_DW
+    bool "DesignWare-based PCIe"
+    depends on RT_MFD_SYSCON
+    depends on RT_USING_DMA
+    default n
+
+config RT_PCI_DW_HOST
+    bool
+    depends on RT_PCI_DW
+
+config RT_PCI_DW_EP
+    bool
+    depends on RT_PCI_DW
diff --git a/components/drivers/pci/host/dw/SConscript b/components/drivers/pci/host/dw/SConscript
new file mode 100644
index 00000000000..ca6c031e004
--- /dev/null
+++ b/components/drivers/pci/host/dw/SConscript
@@ -0,0 +1,21 @@
+from building import *
+
+group = []
+
+if not GetDepend(['RT_PCI_DW']):
+    Return('group')
+
+cwd = GetCurrentDir()
+CPPPATH = [cwd + '/../../../include']
+
+src = ['pcie-dw.c', 'pcie-dw_platfrom.c']
+
+if GetDepend(['RT_PCI_DW_HOST']):
+    src += ['pcie-dw_host.c']
+
+if GetDepend(['RT_PCI_DW_EP']):
+    src += ['pcie-dw_ep.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/components/drivers/pci/host/dw/pcie-dw.c b/components/drivers/pci/host/dw/pcie-dw.c
new file mode 100644
index 00000000000..2cca6f15eb4
--- /dev/null
+++ b/components/drivers/pci/host/dw/pcie-dw.c
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-23     GuEe-GUI     first version
+ */
+
+#define DBG_TAG "pcie.dw"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include "pcie-dw.h"
+
+static rt_uint8_t __dw_pcie_find_next_cap(struct dw_pcie *pci,
+        rt_uint8_t cap_ptr, rt_uint8_t cap)
+{
+    rt_uint16_t reg;
+    rt_uint8_t cap_id, next_cap_ptr;
+
+    if (!cap_ptr)
+    {
+        return 0;
+    }
+
+    reg = dw_pcie_readw_dbi(pci, cap_ptr);
+    cap_id = (reg & 0x00ff);
+
+    if (cap_id > PCIY_MAX)
+    {
+        return 0;
+    }
+
+    if (cap_id == cap)
+    {
+        return cap_ptr;
+    }
+
+    next_cap_ptr = (reg & 0xff00) >> 8;
+    return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+rt_uint8_t dw_pcie_find_capability(struct dw_pcie *pci, rt_uint8_t cap)
+{
+    rt_uint16_t reg;
+    rt_uint8_t next_cap_ptr;
+
+    reg = dw_pcie_readw_dbi(pci, PCIR_CAP_PTR);
+    next_cap_ptr = (reg & 0x00ff);
+
+    return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+static rt_uint16_t dw_pcie_find_next_ext_capability(struct dw_pcie *pci,
+        rt_uint16_t start, rt_uint8_t cap)
+{
+    rt_uint32_t header;
+    int ttl, pos = PCI_REGMAX + 1;
+
+    /* minimum 8 bytes per capability */
+    ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
+
+    if (start)
+    {
+        pos = start;
+    }
+
+    header = dw_pcie_readl_dbi(pci, pos);
+    /*
+     * If we have no capabilities, this is indicated by cap ID,
+     * cap version and next pointer all being 0.
+     */
+    if (header == 0)
+    {
+        return 0;
+    }
+
+    while (ttl-- > 0)
+    {
+        if (PCI_EXTCAP_ID(header) == cap && pos != start)
+        {
+            return pos;
+        }
+
+        pos = PCI_EXTCAP_NEXTPTR(header);
+
+        if (pos < PCI_REGMAX + 1)
+        {
+            break;
+        }
+
+        header = dw_pcie_readl_dbi(pci, pos);
+    }
+
+    return 0;
+}
+
+rt_uint16_t dw_pcie_find_ext_capability(struct dw_pcie *pci, rt_uint8_t cap)
+{
+    return dw_pcie_find_next_ext_capability(pci, 0, cap);
+}
+
+rt_err_t dw_pcie_read(void *addr, rt_size_t size, rt_uint32_t *out_val)
+{
+    /* Check aligned */
+    if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
+    {
+        *out_val = 0;
+        return -RT_EINVAL;
+    }
+
+    if (size == 4)
+    {
+        *out_val = HWREG32(addr);
+    }
+    else if (size == 2)
+    {
+        *out_val = HWREG16(addr);
+    }
+    else if (size == 1)
+    {
+        *out_val = HWREG8(addr);
+    }
+    else
+    {
+        *out_val = 0;
+        return -RT_EINVAL;
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t dw_pcie_write(void *addr, rt_size_t size, rt_uint32_t val)
+{
+    /* Check aligned */
+    if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
+    {
+        return -RT_EINVAL;
+    }
+
+    if (size == 4)
+    {
+        HWREG32(addr) = val;
+    }
+    else if (size == 2)
+    {
+        HWREG16(addr) = val;
+    }
+    else if (size == 1)
+    {
+        HWREG8(addr) = val;
+    }
+    else
+    {
+        return -RT_EINVAL;
+    }
+
+    return RT_EOK;
+}
+
+rt_uint32_t dw_pcie_read_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size)
+{
+    rt_err_t err;
+    rt_uint32_t val = 0;
+
+    if (pci->ops->read_dbi)
+    {
+        return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
+    }
+
+    if ((err = dw_pcie_read(pci->dbi_base + reg, size, &val)))
+    {
+        LOG_E("Read DBI address error = %s", rt_strerror(err));
+    }
+
+    return val;
+}
+
+void dw_pcie_write_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
+{
+    rt_err_t err;
+
+    if (pci->ops->write_dbi)
+    {
+        pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
+        return;
+    }
+
+    if ((err = dw_pcie_write(pci->dbi_base + reg, size, val)))
+    {
+        LOG_E("Write DBI address error = %s", rt_strerror(err));
+    }
+}
+
+void dw_pcie_write_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
+{
+    rt_err_t err;
+
+    if (pci->ops && pci->ops->write_dbi2)
+    {
+        pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
+        return;
+    }
+
+    if ((err = dw_pcie_write(pci->dbi_base2 + reg, size, val)))
+    {
+        LOG_E("Write DBI2 address error = %s", rt_strerror(err));
+    }
+}
+
+rt_uint32_t dw_pcie_readl_atu(struct dw_pcie *pci, rt_uint32_t reg)
+{
+    rt_err_t err;
+    rt_uint32_t val = 0;
+
+    if (pci->ops->read_dbi)
+    {
+        return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
+    }
+
+    if ((err = dw_pcie_read(pci->atu_base + reg, 4, &val)))
+    {
+        LOG_E("Read ATU address error = %s", rt_strerror(err));
+    }
+
+    return val;
+}
+
+void dw_pcie_writel_atu(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
+{
+    rt_err_t err;
+
+    if (pci->ops->write_dbi)
+    {
+        pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
+        return;
+    }
+
+    if ((err = dw_pcie_write(pci->atu_base + reg, 4, val)))
+    {
+        LOG_E("Write ATU address error = %s", rt_strerror(err));
+    }
+}
+
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, rt_uint8_t func_no,
+        int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    rt_uint64_t limit_addr = cpu_addr + size - 1;
+
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
+            rt_lower_32_bits(cpu_addr));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
+            rt_upper_32_bits(cpu_addr));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
+            rt_lower_32_bits(limit_addr));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
+            rt_upper_32_bits(limit_addr));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+            rt_lower_32_bits(pci_addr));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+            rt_upper_32_bits(pci_addr));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+            type | PCIE_ATU_FUNC_NUM(func_no));
+    dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+            PCIE_ATU_ENABLE);
+
+    /*
+     * Make sure ATU enable takes effect before any subsequent config
+     * and I/O accesses.
+     */
+    for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
+    {
+        if (dw_pcie_readl_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
+        {
+            return;
+        }
+
+        rt_thread_mdelay(LINK_WAIT_IATU);
+    }
+
+    LOG_E("Outbound iATU is not being enabled");
+}
+
+static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
+        int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    if (pci->ops->cpu_addr_fixup)
+    {
+        cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+    }
+
+    if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
+    {
+        dw_pcie_prog_outbound_atu_unroll(pci, func_no,
+                index, type, cpu_addr, pci_addr, size);
+
+        return;
+    }
+
+    dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
+    dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, rt_lower_32_bits(cpu_addr));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, rt_upper_32_bits(cpu_addr));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, rt_lower_32_bits(cpu_addr + size - 1));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(pci_addr));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(pci_addr));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+
+    /*
+     * Make sure ATU enable takes effect before any subsequent config
+     * and I/O accesses.
+     */
+    for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
+    {
+        if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
+        {
+            return;
+        }
+
+        rt_thread_mdelay(LINK_WAIT_IATU);
+    }
+
+    LOG_E("Outbound iATU is not being enabled");
+}
+
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+        int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    __dw_pcie_prog_outbound_atu(pci, 0, index, type, cpu_addr, pci_addr, size);
+}
+
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
+        int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    __dw_pcie_prog_outbound_atu(pci, func_no, index, type, cpu_addr, pci_addr, size);
+}
+
+static rt_err_t dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci,
+        rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
+        enum dw_pcie_aspace_type aspace_type)
+{
+    int type;
+
+    dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+            rt_lower_32_bits(cpu_addr));
+    dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+            rt_upper_32_bits(cpu_addr));
+
+    switch (aspace_type)
+    {
+    case DW_PCIE_ASPACE_MEM:
+        type = PCIE_ATU_TYPE_MEM;
+        break;
+
+    case DW_PCIE_ASPACE_IO:
+        type = PCIE_ATU_TYPE_IO;
+        break;
+
+    default:
+        return -RT_EINVAL;
+    }
+
+    dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+            type | PCIE_ATU_FUNC_NUM(func_no));
+    dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+            PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_ENABLE |
+            PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+    /*
+     * Make sure ATU enable takes effect before any subsequent config
+     * and I/O accesses.
+     */
+    for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
+    {
+        if (dw_pcie_readl_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
+        {
+            return RT_EOK;
+        }
+
+        rt_thread_mdelay(LINK_WAIT_IATU);
+    }
+
+    LOG_E("Inbound iATU is not being enabled");
+
+    return -RT_EBUSY;
+}
+
+rt_err_t dw_pcie_prog_inbound_atu(struct dw_pcie *pci,
+        rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
+        enum dw_pcie_aspace_type aspace_type)
+{
+    int type;
+
+    if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
+    {
+        return dw_pcie_prog_inbound_atu_unroll(pci, func_no,
+                index, bar, cpu_addr, aspace_type);
+    }
+
+    dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | index);
+    dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(cpu_addr));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(cpu_addr));
+
+    switch (aspace_type)
+    {
+    case DW_PCIE_ASPACE_MEM:
+        type = PCIE_ATU_TYPE_MEM;
+        break;
+
+    case DW_PCIE_ASPACE_IO:
+        type = PCIE_ATU_TYPE_IO;
+        break;
+
+    default:
+        return -RT_EINVAL;
+    }
+
+    dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
+    dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
+            PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+    /*
+     * Make sure ATU enable takes effect before any subsequent config
+     * and I/O accesses.
+     */
+    for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
+    {
+        if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
+        {
+            return RT_EOK;
+        }
+
+        rt_thread_mdelay(LINK_WAIT_IATU);
+    }
+
+    LOG_E("Inbound iATU is not being enabled");
+
+    return -RT_EBUSY;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type)
+{
+    rt_uint32_t region;
+
+    switch (type)
+    {
+    case DW_PCIE_REGION_INBOUND:
+        region = PCIE_ATU_REGION_INBOUND;
+        break;
+
+    case DW_PCIE_REGION_OUTBOUND:
+        region = PCIE_ATU_REGION_OUTBOUND;
+        break;
+
+    default:
+        return;
+    }
+
+    if (pci->iatu_unroll_enabled)
+    {
+        if (region == PCIE_ATU_REGION_INBOUND)
+        {
+            dw_pcie_writel_ib_unroll(pci, index,
+                    PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
+        }
+        else
+        {
+            dw_pcie_writel_ob_unroll(pci, index,
+                    PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
+        }
+    }
+    else
+    {
+        dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+        dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
+    }
+}
+
+rt_err_t dw_pcie_wait_for_link(struct dw_pcie *pci)
+{
+    /* Check if the link is up or not */
+    for (int retries = 0; retries < LINK_WAIT_MAX_RETRIES; ++retries)
+    {
+        if (dw_pcie_link_up(pci))
+        {
+            LOG_I("%s: Link up", rt_dm_dev_get_name(pci->dev));
+
+            return RT_EOK;
+        }
+
+        rt_hw_us_delay((LINK_WAIT_USLEEP_MIN + LINK_WAIT_USLEEP_MAX) >> 1);
+    }
+
+    LOG_I("PHY link never came up");
+
+    return -RT_ETIMEOUT;
+}
+
+rt_bool_t dw_pcie_link_up(struct dw_pcie *pci)
+{
+    rt_uint32_t val;
+
+    if (pci->ops->link_up)
+    {
+        return pci->ops->link_up(pci);
+    }
+
+    val = HWREG32(pci->dbi_base + PCIE_PORT_DEBUG1);
+
+    return (val & PCIE_PORT_DEBUG1_LINK_UP) && (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING));
+}
+
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
+{
+    rt_uint32_t val;
+
+    val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+    val |= PORT_MLTI_UPCFG_SUPPORT;
+    dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, rt_uint32_t link_gen)
+{
+    rt_uint32_t cap, ctrl2, link_speed;
+    rt_uint8_t offset = dw_pcie_find_capability(pci, PCIY_EXPRESS);
+
+    cap = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CAP);
+    ctrl2 = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CTL2);
+    ctrl2 &= ~PCIEM_LNKCTL2_TLS;
+
+    switch (link_gen)
+    {
+    case 1: link_speed = PCIEM_LNKCTL2_TLS_2_5GT; break;
+    case 2: link_speed = PCIEM_LNKCTL2_TLS_5_0GT; break;
+    case 3: link_speed = PCIEM_LNKCTL2_TLS_8_0GT; break;
+    case 4: link_speed = PCIEM_LNKCTL2_TLS_16_0GT; break;
+    default:
+        /* Use hardware capability */
+        link_speed = RT_FIELD_GET(PCIEM_LINK_CAP_MAX_SPEED, cap);
+        ctrl2 &= ~PCIEM_LNKCTL2_HASD;
+        break;
+    }
+
+    dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CTL2, ctrl2 | link_speed);
+
+    cap &= ~((rt_uint32_t)PCIEM_LINK_CAP_MAX_SPEED);
+    dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CAP, cap | link_speed);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+    rt_uint32_t val;
+    struct rt_device *dev = pci->dev;
+
+    if (pci->version >= 0x480a || (!pci->version && dw_pcie_iatu_unroll_enabled(pci)))
+    {
+        pci->iatu_unroll_enabled |= DWC_IATU_UNROLL_EN;
+
+        if (!pci->atu_base)
+        {
+            pci->atu_base = rt_dm_dev_iomap_by_name(dev, "atu");
+        }
+
+        if (!pci->atu_base)
+        {
+            pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+        }
+    }
+
+    LOG_D("iATU unroll is %sabled", pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN ? "en" : "dis");
+
+    if (pci->link_gen > 0)
+    {
+        dw_pcie_link_set_max_speed(pci, pci->link_gen);
+    }
+
+    /* Configure Gen1 N_FTS */
+    if (pci->fts_number[0])
+    {
+        val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+        val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+        val |= PORT_AFR_N_FTS(pci->fts_number[0]);
+        val |= PORT_AFR_CC_N_FTS(pci->fts_number[0]);
+        dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+    }
+
+    /* Configure Gen2+ N_FTS */
+    if (pci->fts_number[1])
+    {
+        val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+        val &= ~PORT_LOGIC_N_FTS_MASK;
+        val |= pci->fts_number[1];
+        dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+    }
+
+    val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+    val &= ~PORT_LINK_FAST_LINK_MODE;
+    val |= PORT_LINK_DLL_LINK_EN;
+    dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+    if (rt_dm_dev_prop_read_bool(dev, "snps,enable-cdm-check"))
+    {
+        val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+        val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START;
+        dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+    }
+
+    rt_dm_dev_prop_read_u32(dev, "num-lanes", &pci->num_lanes);
+
+    if (!pci->num_lanes)
+    {
+        LOG_D("Using h/w default number of lanes");
+        return;
+    }
+
+    /* Set the number of lanes */
+    val &= ~PORT_LINK_FAST_LINK_MODE;
+    val &= ~PORT_LINK_MODE_MASK;
+    switch (pci->num_lanes)
+    {
+    case 1: val |= PORT_LINK_MODE_1_LANES; break;
+    case 2: val |= PORT_LINK_MODE_2_LANES; break;
+    case 4: val |= PORT_LINK_MODE_4_LANES; break;
+    case 8: val |= PORT_LINK_MODE_8_LANES; break;
+    default:
+        LOG_E("Invail num-lanes = %d", pci->num_lanes);
+        return;
+    }
+    dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+    /* Set link width speed control register */
+    val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+    val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+    switch (pci->num_lanes)
+    {
+    case 1: val |= PORT_LOGIC_LINK_WIDTH_1_LANES; break;
+    case 2: val |= PORT_LOGIC_LINK_WIDTH_2_LANES; break;
+    case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break;
+    case 8: val |= PORT_LOGIC_LINK_WIDTH_8_LANES; break;
+    }
+    val |= pci->user_speed;
+    dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+}
diff --git a/components/drivers/pci/host/dw/pcie-dw.h b/components/drivers/pci/host/dw/pcie-dw.h
new file mode 100644
index 00000000000..c0760bf8840
--- /dev/null
+++ b/components/drivers/pci/host/dw/pcie-dw.h
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-23     GuEe-GUI     first version
+ */
+
+#ifndef __PCIE_DESIGNWARE_H__
+#define __PCIE_DESIGNWARE_H__
+
+#include <rtthread.h>
+#include <rtdevice.h>
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES                       10
+#define LINK_WAIT_USLEEP_MIN                        90000
+#define LINK_WAIT_USLEEP_MAX                        100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES                  5
+#define LINK_WAIT_IATU                              9
+
+/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_AFR                               0x70c
+#define PORT_AFR_N_FTS_MASK                         RT_GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n)                           RT_FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
+#define PORT_AFR_CC_N_FTS_MASK                      RT_GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n)                        RT_FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM                         RT_BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT             24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK              RT_GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT              27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK               RT_GENMASK(29, 27)
+
+#define PCIE_PORT_LINK_CONTROL                      0x710
+#define PORT_LINK_LPBK_ENABLE                       RT_BIT(2)
+#define PORT_LINK_DLL_LINK_EN                       RT_BIT(5)
+#define PORT_LINK_FAST_LINK_MODE                    RT_BIT(7)
+#define PORT_LINK_MODE_MASK                         RT_GENMASK(21, 16)
+#define PORT_LINK_MODE(n)                           RT_FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES                      PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES                      PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES                      PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES                      PORT_LINK_MODE(0xf)
+
+#define PCIE_PORT_DEBUG0                            0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK                 0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0                   0x11
+#define PCIE_PORT_DEBUG1                            0x72c
+#define PCIE_PORT_DEBUG1_LINK_UP                    RT_BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING           RT_BIT(29)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL               0x80c
+#define PORT_LOGIC_N_FTS_MASK                       RT_GENMASK(7, 0)
+#define PORT_LOGIC_SPEED_CHANGE                     RT_BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK                  RT_GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n)                    RT_FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES               PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES               PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES               PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES               PORT_LOGIC_LINK_WIDTH(0x8)
+
+#define PCIE_MSI_ADDR_LO                            0x820
+#define PCIE_MSI_ADDR_HI                            0x824
+#define PCIE_MSI_INTR0_ENABLE                       0x828
+#define PCIE_MSI_INTR0_MASK                         0x82c
+#define PCIE_MSI_INTR0_STATUS                       0x830
+
+#define PCIE_PORT_MULTI_LANE_CTRL                   0x8c0
+#define PORT_MLTI_UPCFG_SUPPORT                     RT_BIT(7)
+
+#define PCIE_ATU_VIEWPORT                           0x900
+#define PCIE_ATU_REGION_INBOUND                     RT_BIT(31)
+#define PCIE_ATU_REGION_OUTBOUND                    0
+#define PCIE_ATU_CR1                                0x904
+#define PCIE_ATU_TYPE_MEM                           0x0
+#define PCIE_ATU_TYPE_IO                            0x2
+#define PCIE_ATU_TYPE_CFG0                          0x4
+#define PCIE_ATU_TYPE_CFG1                          0x5
+#define PCIE_ATU_FUNC_NUM(pf)                       ((pf) << 20)
+#define PCIE_ATU_CR2                                0x908
+#define PCIE_ATU_ENABLE                             RT_BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE                    RT_BIT(30)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN                  RT_BIT(19)
+#define PCIE_ATU_LOWER_BASE                         0x90c
+#define PCIE_ATU_UPPER_BASE                         0x910
+#define PCIE_ATU_LIMIT                              0x914
+#define PCIE_ATU_LOWER_TARGET                       0x918
+#define PCIE_ATU_BUS(x)                             RT_FIELD_PREP(RT_GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x)                             RT_FIELD_PREP(RT_GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x)                            RT_FIELD_PREP(RT_GENMASK(18, 16), x)
+#define PCIE_ATU_UPPER_TARGET                       0x91c
+
+#define PCIE_MISC_CONTROL_1_OFF                     0x8bc
+#define PCIE_DBI_RO_WR_EN                           RT_BIT(0)
+
+#define PCIE_MSIX_DOORBELL                          0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT                 24
+
+#define PCIE_PL_CHK_REG_CONTROL_STATUS              0xb20
+#define PCIE_PL_CHK_REG_CHK_REG_START               RT_BIT(0)
+#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS          RT_BIT(1)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR    RT_BIT(16)
+#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR         RT_BIT(17)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE            RT_BIT(18)
+
+#define PCIE_PL_CHK_REG_ERR_ADDR                    0xb28
+
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1                   0x00
+#define PCIE_ATU_UNR_REGION_CTRL2                   0x04
+#define PCIE_ATU_UNR_LOWER_BASE                     0x08
+#define PCIE_ATU_UNR_UPPER_BASE                     0x0C
+#define PCIE_ATU_UNR_LOWER_LIMIT                    0x10
+#define PCIE_ATU_UNR_LOWER_TARGET                   0x14
+#define PCIE_ATU_UNR_UPPER_TARGET                   0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT                    0x20
+
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET                      (0x3 << 20)
+
+/* Register address builder */
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)    ((region) << 9)
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region)     (((region) << 9) | RT_BIT(8))
+
+#define MAX_MSI_IRQS                                256
+#define MAX_MSI_IRQS_PER_CTRL                       32
+#define MAX_MSI_CTRLS                               (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE                     12
+#define MSI_DEF_NUM_VECTORS                         32
+
+/* Maximum number of inbound/outbound iATUs */
+#define MAX_IATU_IN                                 256
+#define MAX_IATU_OUT                                256
+
+#define DWC_IATU_UNROLL_EN                          RT_BIT(0)
+#define DWC_IATU_IOCFG_SHARED                       RT_BIT(1)
+
+struct dw_pcie_host_ops;
+struct dw_pcie_ep_ops;
+struct dw_pcie_ops;
+
+enum dw_pcie_region_type
+{
+    DW_PCIE_REGION_UNKNOWN,
+    DW_PCIE_REGION_INBOUND,
+    DW_PCIE_REGION_OUTBOUND,
+};
+
+enum dw_pcie_device_mode
+{
+    DW_PCIE_UNKNOWN_TYPE,
+    DW_PCIE_EP_TYPE,
+    DW_PCIE_LEG_EP_TYPE,
+    DW_PCIE_RC_TYPE,
+};
+
+enum dw_pcie_aspace_type
+{
+    DW_PCIE_ASPACE_UNKNOWN,
+    DW_PCIE_ASPACE_MEM,
+    DW_PCIE_ASPACE_IO,
+};
+
+struct dw_pcie_port
+{
+    void *cfg0_base;
+    rt_uint64_t cfg0_addr;
+    rt_uint64_t cfg0_size;
+
+    rt_ubase_t io_addr;
+    rt_ubase_t io_bus_addr;
+    rt_size_t io_size;
+
+    const struct dw_pcie_host_ops *ops;
+
+    int sys_irq;
+    int msi_irq;
+    struct rt_pic *irq_pic;
+    struct rt_pic *msi_pic;
+
+    void *msi_data;
+    rt_ubase_t msi_data_phy;
+
+    rt_uint32_t irq_count;
+    rt_uint32_t irq_mask[MAX_MSI_CTRLS];
+
+    struct rt_pci_host_bridge *bridge;
+    const struct rt_pci_ops *bridge_child_ops;
+
+    struct rt_spinlock lock;
+    RT_BITMAP_DECLARE(msi_map, MAX_MSI_IRQS);
+};
+
+struct dw_pcie_host_ops
+{
+    rt_err_t (*host_init)(struct dw_pcie_port *port);
+    rt_err_t (*msi_host_init)(struct dw_pcie_port *port);
+    void (*set_irq_count)(struct dw_pcie_port *port);
+};
+
+struct dw_pcie_ep_func
+{
+    rt_list_t list;
+
+    rt_uint8_t func_no;
+    rt_uint8_t msi_cap;     /* MSI capability offset */
+    rt_uint8_t msix_cap;    /* MSI-X capability offset */
+};
+
+struct dw_pcie_ep
+{
+    struct rt_pci_ep *epc;
+    struct rt_pci_ep_bar *epc_bar[PCI_STD_NUM_BARS];
+
+    rt_list_t func_nodes;
+
+    const struct dw_pcie_ep_ops *ops;
+
+    rt_uint64_t aspace;
+    rt_uint64_t aspace_size;
+    rt_size_t page_size;
+
+    rt_uint8_t bar_to_atu[PCI_STD_NUM_BARS];
+    rt_ubase_t *outbound_addr;
+
+    rt_bitmap_t *ib_window_map;
+    rt_bitmap_t *ob_window_map;
+    rt_uint32_t num_ib_windows;
+    rt_uint32_t num_ob_windows;
+
+    void *msi_mem;
+    rt_ubase_t msi_mem_phy;
+};
+
+struct dw_pcie_ep_ops
+{
+    rt_err_t (*ep_init)(struct dw_pcie_ep *ep);
+    rt_err_t (*raise_irq)(struct dw_pcie_ep *ep, rt_uint8_t func_no, enum rt_pci_ep_irq type, unsigned irq);
+    rt_off_t (*func_select)(struct dw_pcie_ep *ep, rt_uint8_t func_no);
+};
+
+struct dw_pcie
+{
+    struct rt_device *dev;
+
+    void *dbi_base;
+    void *dbi_base2;
+    void *atu_base;
+
+    rt_uint32_t version;
+    rt_uint32_t num_viewport;
+    rt_uint32_t num_lanes;
+    rt_uint32_t link_gen;
+    rt_uint32_t user_speed;
+    rt_uint8_t iatu_unroll_enabled; /* Internal Address Translation Unit */
+    rt_uint8_t fts_number[2];       /* Fast Training Sequences */
+
+    struct dw_pcie_port port;
+    struct dw_pcie_ep endpoint;
+    const struct dw_pcie_ops *ops;
+
+    void *priv;
+};
+
+struct dw_pcie_ops
+{
+    rt_uint64_t (*cpu_addr_fixup)(struct dw_pcie *pcie, rt_uint64_t cpu_addr);
+    rt_uint32_t (*read_dbi)(struct dw_pcie *pcie, void *base, rt_uint32_t reg, rt_size_t size);
+    void        (*write_dbi)(struct dw_pcie *pcie, void *base, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
+    void        (*write_dbi2)(struct dw_pcie *pcie, void *base, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
+    rt_bool_t   (*link_up)(struct dw_pcie *pcie);
+    rt_err_t    (*start_link)(struct dw_pcie *pcie);
+    void        (*stop_link)(struct dw_pcie *pcie);
+};
+
+#define to_dw_pcie_from_port(ptr)       rt_container_of((ptr), struct dw_pcie, port)
+#define to_dw_pcie_from_endpoint(ptr)   rt_container_of((ptr), struct dw_pcie, endpoint)
+
+#ifdef RT_PCI_DW_HOST
+#undef RT_PCI_DW_HOST
+#define RT_PCI_DW_HOST                  1
+#define HOST_API
+#define HOST_RET(...)                   ;
+#else
+#define HOST_API                        rt_inline
+#define HOST_RET(...)                   { return __VA_ARGS__; }
+#endif
+
+#ifdef RT_PCI_DW_EP
+#undef RT_PCI_DW_EP
+#define RT_PCI_DW_EP                    1
+#define EP_API
+#define EP_RET(...)                     ;
+#else
+#define EP_API                          rt_inline
+#define EP_RET(...)                     { return __VA_ARGS__; }
+#endif
+
+rt_uint8_t dw_pcie_find_capability(struct dw_pcie *pci, rt_uint8_t cap);
+rt_uint16_t dw_pcie_find_ext_capability(struct dw_pcie *pci, rt_uint8_t cap);
+
+rt_err_t dw_pcie_read(void *addr, rt_size_t size, rt_uint32_t *out_val);
+rt_err_t dw_pcie_write(void *addr, rt_size_t size, rt_uint32_t val);
+
+rt_uint32_t dw_pcie_read_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val);
+rt_uint32_t dw_pcie_readl_atu(struct dw_pcie *pci, rt_uint32_t reg);
+void dw_pcie_writel_atu(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val);
+rt_bool_t dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
+rt_err_t dw_pcie_wait_for_link(struct dw_pcie *pci);
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size);
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no, int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size);
+rt_err_t dw_pcie_prog_inbound_atu(struct dw_pcie *pci, rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr, enum dw_pcie_aspace_type aspace_type);
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type);
+void dw_pcie_setup(struct dw_pcie *pci);
+
+rt_inline void dw_pcie_writel_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
+{
+    dw_pcie_write_dbi(pci, reg, 0x4, val);
+}
+
+rt_inline rt_uint32_t dw_pcie_readl_dbi(struct dw_pcie *pci, rt_uint32_t reg)
+{
+    return dw_pcie_read_dbi(pci, reg, 0x4);
+}
+
+rt_inline void dw_pcie_writew_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_uint16_t val)
+{
+    dw_pcie_write_dbi(pci, reg, 0x2, val);
+}
+
+rt_inline rt_uint16_t dw_pcie_readw_dbi(struct dw_pcie *pci, rt_uint32_t reg)
+{
+    return dw_pcie_read_dbi(pci, reg, 0x2);
+}
+
+rt_inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_uint8_t val)
+{
+    dw_pcie_write_dbi(pci, reg, 0x1, val);
+}
+
+rt_inline rt_uint8_t dw_pcie_readb_dbi(struct dw_pcie *pci, rt_uint32_t reg)
+{
+    return dw_pcie_read_dbi(pci, reg, 0x1);
+}
+
+rt_inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
+{
+    dw_pcie_write_dbi2(pci, reg, 0x4, val);
+}
+
+rt_inline void dw_pcie_dbi_ro_writable_enable(struct dw_pcie *pci, rt_bool_t enable)
+{
+    const rt_uint32_t reg = PCIE_MISC_CONTROL_1_OFF;
+
+    if (enable)
+    {
+        dw_pcie_writel_dbi(pci, reg, dw_pcie_readl_dbi(pci, reg) | PCIE_DBI_RO_WR_EN);
+    }
+    else
+    {
+        dw_pcie_writel_dbi(pci, reg, dw_pcie_readl_dbi(pci, reg) & ~PCIE_DBI_RO_WR_EN);
+    }
+}
+
+rt_inline rt_uint8_t dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+{
+    return dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) == 0xffffffff ? 1 : 0;
+}
+
+rt_inline rt_uint32_t dw_pcie_readl_ob_unroll(struct dw_pcie *pci,
+        rt_uint32_t index, rt_uint32_t reg)
+{
+    return dw_pcie_readl_atu(pci, PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index) + reg);
+}
+
+rt_inline void dw_pcie_writel_ob_unroll(struct dw_pcie *pci,
+        rt_uint32_t index, rt_uint32_t reg, rt_uint32_t val)
+{
+    dw_pcie_writel_atu(pci, PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index) + reg, val);
+}
+
+rt_inline rt_uint32_t dw_pcie_readl_ib_unroll(struct dw_pcie *pci,
+        rt_uint32_t index, rt_uint32_t reg)
+{
+    return dw_pcie_readl_atu(pci, PCIE_GET_ATU_INB_UNR_REG_OFFSET(index) + reg);
+}
+
+rt_inline void dw_pcie_writel_ib_unroll(struct dw_pcie *pci,
+        rt_uint32_t index, rt_uint32_t reg, rt_uint32_t val)
+{
+    dw_pcie_writel_atu(pci, reg + PCIE_GET_ATU_INB_UNR_REG_OFFSET(index), val);
+}
+
+HOST_API rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port) HOST_RET(-RT_ENOSYS)
+HOST_API void dw_pcie_msi_init(struct dw_pcie_port *port) HOST_RET()
+HOST_API void dw_pcie_free_msi(struct dw_pcie_port *port) HOST_RET()
+
+HOST_API void dw_pcie_setup_rc(struct dw_pcie_port *port) HOST_RET()
+
+HOST_API rt_err_t dw_pcie_host_init(struct dw_pcie_port *port) HOST_RET(-RT_ENOSYS)
+HOST_API void dw_pcie_host_deinit(struct dw_pcie_port *port) HOST_RET()
+
+HOST_API void dw_pcie_host_free(struct dw_pcie_port *port) HOST_RET()
+
+HOST_API void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg) HOST_RET(RT_NULL)
+
+EP_API rt_err_t dw_pcie_ep_init(struct dw_pcie_ep *ep) EP_RET(-RT_ENOSYS)
+EP_API rt_err_t dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) EP_RET(-RT_ENOSYS)
+EP_API void dw_pcie_ep_exit(struct dw_pcie_ep *ep) EP_RET()
+
+EP_API rt_err_t dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no) EP_RET(-RT_ENOSYS)
+EP_API rt_err_t dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no, unsigned irq) EP_RET(-RT_ENOSYS)
+EP_API rt_err_t dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no, unsigned irq) EP_RET(-RT_ENOSYS)
+EP_API rt_err_t dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, rt_uint8_t func_no, unsigned irq) EP_RET(-RT_ENOSYS)
+
+EP_API void dw_pcie_ep_reset_bar(struct dw_pcie *pci, int bar_idx) EP_RET()
+
+EP_API rt_err_t dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        int bar_idx, rt_ubase_t cpu_addr, enum dw_pcie_aspace_type aspace_type) EP_RET(-RT_ENOSYS)
+EP_API rt_err_t dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        rt_ubase_t phys_addr, rt_uint64_t pci_addr, rt_size_t size) EP_RET(-RT_ENOSYS)
+
+EP_API struct dw_pcie_ep_func *dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, rt_uint8_t func_no) EP_RET()
+
+#endif /* __PCIE_DESIGNWARE_H__ */
diff --git a/components/drivers/pci/host/dw/pcie-dw_ep.c b/components/drivers/pci/host/dw/pcie-dw_ep.c
new file mode 100644
index 00000000000..b52f6b5324a
--- /dev/null
+++ b/components/drivers/pci/host/dw/pcie-dw_ep.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-23     GuEe-GUI     first version
+ */
+
+#define DBG_TAG "pcie.dw-ep"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include "pcie-dw.h"
+
+struct dw_pcie_ep_func *dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, rt_uint8_t func_no)
+{
+    struct dw_pcie_ep_func *ep_func;
+
+    rt_list_for_each_entry(ep_func, &ep->func_nodes, list)
+    {
+        if (ep_func->func_no == func_no)
+        {
+            return ep_func;
+        }
+    }
+
+    return RT_NULL;
+}
+
+static rt_uint8_t dw_pcie_ep_func_select(struct dw_pcie_ep *ep, rt_uint8_t func_no)
+{
+    rt_uint8_t func_offset = 0;
+
+    if (ep->ops->func_select)
+    {
+        func_offset = ep->ops->func_select(ep, func_no);
+    }
+
+    return func_offset;
+}
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, rt_uint8_t func_no,
+        int bar_idx, int flags)
+{
+    rt_uint32_t reg;
+    rt_uint8_t func_offset = 0;
+    struct dw_pcie_ep *ep = &pci->endpoint;
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = func_offset + PCIR_BAR(bar_idx);
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+
+    dw_pcie_writel_dbi2(pci, reg, 0x0);
+    dw_pcie_writel_dbi(pci, reg, 0x0);
+
+    if (flags & PCIM_BAR_MEM_TYPE_64)
+    {
+        dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
+        dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+    }
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+}
+
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, int bar_idx)
+{
+    rt_uint8_t func_no, funcs = pci->endpoint.epc->max_functions;
+
+    for (func_no = 0; func_no < funcs; ++func_no)
+    {
+        __dw_pcie_ep_reset_bar(pci, func_no, bar_idx, 0);
+    }
+}
+
+static rt_uint8_t __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        rt_uint8_t cap_ptr, rt_uint8_t cap)
+{
+    rt_uint16_t reg;
+    rt_uint8_t func_offset = 0, cap_id, next_cap_ptr;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    if (!cap_ptr)
+    {
+        return 0;
+    }
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+    cap_id = (reg & 0x00ff);
+
+    if (cap_id > PCIY_MAX)
+    {
+        return 0;
+    }
+
+    if (cap_id == cap)
+    {
+        return cap_ptr;
+    }
+
+    next_cap_ptr = (reg & 0xff00) >> 8;
+    return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static rt_uint8_t dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        rt_uint8_t cap)
+{
+    rt_uint16_t reg;
+    rt_uint8_t func_offset = 0, next_cap_ptr;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = dw_pcie_readw_dbi(pci, func_offset + PCIR_CAP_PTR);
+    next_cap_ptr = reg & 0x00ff;
+
+    return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+rt_err_t dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        int bar_idx, rt_ubase_t cpu_addr, enum dw_pcie_aspace_type aspace_type)
+{
+    rt_err_t err;
+    rt_uint32_t free_win;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    free_win = rt_bitmap_next_clear_bit(ep->ib_window_map, 0, ep->num_ib_windows);
+    if (free_win >= ep->num_ib_windows)
+    {
+        LOG_E("No free inbound window");
+        return -RT_EEMPTY;
+    }
+
+    err = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar_idx, cpu_addr, aspace_type);
+    if (err)
+    {
+        LOG_E("Failed to program IB window error = %s", rt_strerror(err));
+        return err;
+    }
+
+    ep->bar_to_atu[bar_idx] = free_win;
+    rt_bitmap_set_bit(ep->ib_window_map, free_win);
+
+    return RT_EOK;
+}
+
+rt_err_t dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        rt_ubase_t phys_addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    rt_uint32_t free_win;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    free_win = rt_bitmap_next_clear_bit(ep->ob_window_map, 0, ep->num_ob_windows);
+    if (free_win >= ep->num_ob_windows)
+    {
+        LOG_E("No free outbound window");
+        return -RT_EEMPTY;
+    }
+
+    dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+            phys_addr, pci_addr, size);
+
+    ep->outbound_addr[free_win] = phys_addr;
+    rt_bitmap_set_bit(ep->ob_window_map, free_win);
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_write_header(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        struct rt_pci_ep_header *hdr)
+{
+    rt_uint8_t func_offset = 0;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+
+    dw_pcie_writew_dbi(pci, func_offset + PCIR_VENDOR, hdr->vendor);
+    dw_pcie_writew_dbi(pci, func_offset + PCIR_DEVICE, hdr->device);
+    dw_pcie_writeb_dbi(pci, func_offset + PCIR_REVID, hdr->revision);
+    dw_pcie_writeb_dbi(pci, func_offset + PCIR_PROGIF, hdr->progif);
+    dw_pcie_writew_dbi(pci, func_offset + PCIR_SUBCLASS, hdr->subclass | hdr->class_code << 8);
+    dw_pcie_writeb_dbi(pci, func_offset + PCIR_CACHELNSZ, hdr->cache_line_size);
+    dw_pcie_writew_dbi(pci, func_offset + PCIR_SUBVEND_0, hdr->subsystem_vendor);
+    dw_pcie_writew_dbi(pci, func_offset + PCIR_SUBDEV_0, hdr->subsystem_device);
+    dw_pcie_writeb_dbi(pci, func_offset + PCIR_INTPIN, hdr->intx);
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+
+    return 0;
+}
+
+static rt_err_t dw_pcie_ep_clear_bar(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        struct rt_pci_ep_bar *bar, int bar_idx)
+{
+    rt_uint32_t atu_index;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    atu_index = ep->bar_to_atu[bar_idx];
+    __dw_pcie_ep_reset_bar(pci, func_no, bar_idx, ep->epc_bar[bar_idx]->bus.flags);
+
+    dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+    rt_bitmap_clear_bit(ep->ib_window_map, atu_index);
+    ep->epc_bar[bar_idx] = RT_NULL;
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_set_bar(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        struct rt_pci_ep_bar *bar, int bar_idx)
+{
+    rt_err_t err;
+    rt_uint32_t reg;
+    rt_uint8_t func_offset = 0;
+    rt_size_t size = bar->bus.size;
+    rt_ubase_t flags = bar->bus.flags;
+    enum dw_pcie_aspace_type aspace_type;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = PCIR_BAR(bar_idx) + func_offset;
+
+    if (!(flags & PCIM_BAR_SPACE))
+    {
+        aspace_type = DW_PCIE_ASPACE_MEM;
+    }
+    else
+    {
+        aspace_type = DW_PCIE_ASPACE_IO;
+    }
+
+    err = dw_pcie_ep_inbound_atu(ep, func_no, bar_idx, bar->bus.base, aspace_type);
+    if (err)
+    {
+        return err;
+    }
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+
+    dw_pcie_writel_dbi2(pci, reg, rt_lower_32_bits(size - 1));
+    dw_pcie_writel_dbi(pci, reg, flags);
+
+    if (flags & PCIM_BAR_MEM_TYPE_64)
+    {
+        dw_pcie_writel_dbi2(pci, reg + 4, rt_upper_32_bits(size - 1));
+        dw_pcie_writel_dbi(pci, reg + 4, 0);
+    }
+
+    ep->epc_bar[bar_idx] = bar;
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+
+    return 0;
+}
+
+static rt_err_t dw_pcie_find_index(struct dw_pcie_ep *ep,
+        rt_ubase_t addr, rt_uint32_t *atu_index)
+{
+    for (rt_uint32_t index = 0; index < ep->num_ob_windows; ++index)
+    {
+        if (ep->outbound_addr[index] != addr)
+        {
+            continue;
+        }
+
+        *atu_index = index;
+
+        return RT_EOK;
+    }
+
+    return -RT_EINVAL;
+}
+
+static rt_err_t dw_pcie_ep_unmap_addr(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        rt_ubase_t addr)
+{
+    rt_err_t err;
+    rt_uint32_t atu_index;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    if ((err = dw_pcie_find_index(ep, addr, &atu_index)))
+    {
+        return err;
+    }
+
+    dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+    rt_bitmap_clear_bit(ep->ob_window_map, atu_index);
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_map_addr(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    rt_err_t err;
+    struct dw_pcie_ep *ep = epc->priv;
+
+    err = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+    if (err)
+    {
+        LOG_E("Failed to enable address error = %s", rt_strerror(err));
+        return err;
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_set_msi(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        unsigned irq_nr)
+{
+    rt_uint32_t val, reg;
+    rt_uint8_t func_offset = 0;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msi_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = ep_func->msi_cap + func_offset + PCIR_MSI_CTRL;
+
+    val = dw_pcie_readw_dbi(pci, reg);
+    val &= ~PCIM_MSICTRL_MMC_MASK;
+    val |= (irq_nr << 1) & PCIM_MSICTRL_MMC_MASK;
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+    dw_pcie_writew_dbi(pci, reg, val);
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_get_msi(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        unsigned *out_irq_nr)
+{
+    rt_uint32_t val, reg;
+    rt_uint8_t func_offset = 0;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msi_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = ep_func->msi_cap + func_offset + PCIR_MSI_CTRL;
+
+    val = dw_pcie_readw_dbi(pci, reg);
+    if (!(val & PCIM_MSICTRL_MSI_ENABLE))
+    {
+        return -RT_EINVAL;
+    }
+
+    *out_irq_nr = (val & PCIM_MSICTRL_MME_MASK) >> 4;
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_set_msix(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        unsigned irq_nr, int bar_idx, rt_off_t offset)
+{
+    rt_uint32_t val, reg;
+    rt_uint8_t func_offset = 0;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msix_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = ep_func->msix_cap + func_offset + PCIR_MSIX_CTRL;
+
+    val = dw_pcie_readw_dbi(pci, reg);
+    val &= ~PCIM_MSIXCTRL_TABLE_SIZE;
+    val |= irq_nr;
+    dw_pcie_writew_dbi(pci, reg, val);
+
+    reg = ep_func->msix_cap + func_offset + PCIR_MSIX_TABLE;
+    val = offset | bar_idx;
+    dw_pcie_writel_dbi(pci, reg, val);
+
+    reg = ep_func->msix_cap + func_offset + PCIR_MSIX_PBA;
+    val = (offset + (irq_nr * PCIM_MSIX_ENTRY_SIZE)) | bar_idx;
+    dw_pcie_writel_dbi(pci, reg, val);
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_get_msix(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        unsigned *out_irq_nr)
+{
+    rt_uint32_t val, reg;
+    rt_uint8_t func_offset = 0;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msix_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = ep_func->msix_cap + func_offset + PCIR_MSIX_CTRL;
+
+    val = dw_pcie_readw_dbi(pci, reg);
+    if (!(val & PCIM_MSIXCTRL_MSIX_ENABLE))
+    {
+        return -RT_EINVAL;
+    }
+
+    *out_irq_nr = val & PCIM_MSIXCTRL_TABLE_SIZE;
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_raise_irq(struct rt_pci_ep *epc, rt_uint8_t func_no,
+        enum rt_pci_ep_irq type, unsigned irq)
+{
+    struct dw_pcie_ep *ep = epc->priv;
+
+    if (!ep->ops->raise_irq)
+    {
+        return -RT_ENOSYS;
+    }
+
+    return ep->ops->raise_irq(ep, func_no, type, irq);
+}
+
+static rt_err_t dw_pcie_ep_stop(struct rt_pci_ep *epc)
+{
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    if (pci->ops->stop_link)
+    {
+        pci->ops->stop_link(pci);
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_pcie_ep_start(struct rt_pci_ep *epc)
+{
+    struct dw_pcie_ep *ep = epc->priv;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    if (pci->ops->start_link)
+    {
+        return pci->ops->start_link(pci);
+    }
+
+    return RT_EOK;
+}
+
+static const struct rt_pci_ep_ops dw_pcie_ep_ops =
+{
+    .write_header   = dw_pcie_ep_write_header,
+    .set_bar        = dw_pcie_ep_set_bar,
+    .clear_bar      = dw_pcie_ep_clear_bar,
+    .map_addr       = dw_pcie_ep_map_addr,
+    .unmap_addr     = dw_pcie_ep_unmap_addr,
+    .set_msi        = dw_pcie_ep_set_msi,
+    .get_msi        = dw_pcie_ep_get_msi,
+    .set_msix       = dw_pcie_ep_set_msix,
+    .get_msix       = dw_pcie_ep_get_msix,
+    .raise_irq      = dw_pcie_ep_raise_irq,
+    .start          = dw_pcie_ep_start,
+    .stop           = dw_pcie_ep_stop,
+};
+
+rt_err_t dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no)
+{
+    LOG_E("EP cannot trigger legacy IRQs");
+
+    return -RT_EINVAL;
+}
+
+rt_err_t dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        unsigned irq)
+{
+    rt_err_t err;
+    rt_off_t aligned_offset;
+    rt_uint8_t func_offset = 0;
+    rt_uint64_t msg_addr;
+    rt_uint16_t msg_ctrl, msg_data;
+    rt_uint32_t msg_addr_lower, msg_addr_upper, reg;
+    struct rt_pci_ep *epc = ep->epc;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msi_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+    /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
+    reg = ep_func->msi_cap + func_offset + PCIR_MSI_CTRL;
+    msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+    reg = ep_func->msi_cap + func_offset + PCIR_MSI_ADDR;
+    msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+
+    if (!!(msg_ctrl & PCIM_MSICTRL_64BIT))
+    {
+        reg = ep_func->msi_cap + func_offset + PCIR_MSI_ADDR_HIGH;
+        msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+        reg = ep_func->msi_cap + func_offset + PCIR_MSI_DATA_64BIT;
+        msg_data = dw_pcie_readw_dbi(pci, reg);
+    }
+    else
+    {
+        msg_addr_upper = 0;
+        reg = ep_func->msi_cap + func_offset + PCIR_MSI_DATA;
+        msg_data = dw_pcie_readw_dbi(pci, reg);
+    }
+
+    aligned_offset = msg_addr_lower & (ep->page_size - 1);
+    msg_addr = ((rt_uint64_t)msg_addr_upper << 32) | (msg_addr_lower & ~aligned_offset);
+
+    if ((err = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phy, msg_addr, ep->page_size)))
+    {
+        return err;
+    }
+
+    HWREG32(ep->msi_mem + aligned_offset) = msg_data | (irq - 1);
+    dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phy);
+
+    return RT_EOK;
+}
+
+rt_err_t dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        unsigned irq)
+{
+    rt_uint32_t msg_data;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msix_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | (irq - 1);
+    dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
+
+    return RT_EOK;
+}
+
+rt_err_t dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, rt_uint8_t func_no,
+        unsigned irq)
+{
+    rt_err_t err;
+    int bar_idx;
+    rt_uint64_t msg_addr;
+    rt_uint32_t tbl_offset;
+    rt_off_t aligned_offset;
+    rt_uint8_t func_offset = 0;
+    rt_uint32_t reg, msg_data, vec_ctrl;
+    struct rt_pci_ep *epc = ep->epc;
+    struct rt_pci_ep_msix_tbl *msix_tbl;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+    if (!ep_func || !ep_func->msix_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    func_offset = dw_pcie_ep_func_select(ep, func_no);
+    reg = ep_func->msix_cap + func_offset + PCIR_MSIX_TABLE;
+
+    tbl_offset = dw_pcie_readl_dbi(pci, reg);
+    bar_idx = (tbl_offset & PCIM_MSIX_BIR_MASK);
+    tbl_offset &= PCIM_MSIX_TABLE_OFFSET;
+
+    msix_tbl = (void *)ep->epc_bar[bar_idx]->cpu_addr + tbl_offset;
+    msg_addr = msix_tbl[(irq - 1)].msg_addr;
+    msg_data = msix_tbl[(irq - 1)].msg_data;
+    vec_ctrl = msix_tbl[(irq - 1)].vector_ctrl;
+
+    if (vec_ctrl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK)
+    {
+        return -RT_EINVAL;
+    }
+
+    aligned_offset = msg_addr & (ep->page_size - 1);
+
+    if ((err = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phy, msg_addr, ep->page_size)))
+    {
+        return err;
+    }
+
+    HWREG32(ep->msi_mem + aligned_offset) = msg_data;
+    dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phy);
+
+    return RT_EOK;
+}
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+    struct rt_pci_ep *epc = ep->epc;
+
+    if (ep->msi_mem)
+    {
+        rt_pci_ep_mem_free(epc, ep->msi_mem, ep->msi_mem_phy, ep->page_size);
+    }
+
+    if (!rt_list_isempty(&ep->func_nodes))
+    {
+        struct dw_pcie_ep_func *ep_func, *ep_func_next;
+
+        rt_list_for_each_entry_safe(ep_func, ep_func_next, &ep->func_nodes, list)
+        {
+            rt_list_remove(&ep_func->list);
+            rt_free(ep_func);
+        }
+    }
+
+    if (ep->ib_window_map)
+    {
+        rt_free(ep->ib_window_map);
+    }
+
+    if (ep->ob_window_map)
+    {
+        rt_free(ep->ob_window_map);
+    }
+
+    if (ep->outbound_addr)
+    {
+        rt_free(ep->outbound_addr);
+    }
+
+    if (epc)
+    {
+        rt_free(epc);
+    }
+}
+
+static rt_uint32_t dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+    rt_uint32_t header;
+    int pos = (PCI_REGMAX + 1);
+
+    while (pos)
+    {
+        header = dw_pcie_readl_dbi(pci, pos);
+
+        if (PCI_EXTCAP_ID(header) == cap)
+        {
+            return pos;
+        }
+
+        if (!(pos = PCI_EXTCAP_NEXTPTR(header)))
+        {
+            break;
+        }
+    }
+
+    return 0;
+}
+
+rt_err_t dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+    rt_off_t offset;
+    rt_size_t bar_nr;
+    rt_uint32_t reg;
+    rt_uint8_t hdr_type;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    hdr_type = dw_pcie_readb_dbi(pci, PCIR_HDRTYPE) & PCIM_HDRTYPE;
+    if (hdr_type != PCIM_HDRTYPE_NORMAL)
+    {
+        LOG_E("PCIe controller is not set to EP mode hdr_type = %x", hdr_type);
+        return -RT_EIO;
+    }
+
+    offset = dw_pcie_ep_find_ext_capability(pci, PCIZ_RESIZE_BAR);
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+
+    if (offset)
+    {
+        reg = dw_pcie_readl_dbi(pci, offset + PCIM_REBAR_CTRL);
+        bar_nr = (reg & PCIM_REBAR_CTRL_NBAR_MASK) >> PCIM_REBAR_CTRL_NBAR_SHIFT;
+
+        for (int i = 0; i < bar_nr; ++i, offset += PCIM_REBAR_CTRL)
+        {
+            dw_pcie_writel_dbi(pci, offset + PCIM_REBAR_CAP, 0x0);
+        }
+    }
+
+    dw_pcie_setup(pci);
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+
+    return RT_EOK;
+}
+
+rt_err_t dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+    rt_err_t err;
+    struct rt_pci_ep *epc = RT_NULL;
+    struct dw_pcie_ep_func *ep_func;
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+    struct rt_device *dev = pci->dev;
+
+    rt_list_init(&ep->func_nodes);
+
+    if (!pci->dbi_base || !pci->dbi_base2)
+    {
+        LOG_E("dbi_base/dbi_base2 is not populated");
+        return -RT_EINVAL;
+    }
+
+    if ((err = rt_dm_dev_prop_read_u32(dev, "num-ib-windows", &ep->num_ib_windows)))
+    {
+        LOG_E("Unable to read 'num-ib-windows' property");
+        return err;
+    }
+
+    if (ep->num_ib_windows > MAX_IATU_IN)
+    {
+        LOG_E("Invalid 'num-ib-windows'");
+        return -RT_EINVAL;
+    }
+
+    if ((err = rt_dm_dev_prop_read_u32(dev, "num-ob-windows", &ep->num_ob_windows)))
+    {
+        LOG_E("Unable to read 'num-ob-windows' property");
+        return err;
+    }
+
+    if (ep->num_ob_windows > MAX_IATU_OUT)
+    {
+        LOG_E("Invalid 'num-ob-windows'");
+        return -RT_EINVAL;
+    }
+
+    ep->ib_window_map = rt_calloc(RT_BITMAP_LEN(ep->num_ib_windows), sizeof(rt_bitmap_t));
+    if (!ep->ib_window_map)
+    {
+        return -RT_ENOMEM;
+    }
+
+    ep->ob_window_map = rt_calloc(RT_BITMAP_LEN(ep->num_ob_windows), sizeof(rt_bitmap_t));
+    if (!ep->ob_window_map)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    ep->outbound_addr = rt_calloc(ep->num_ob_windows, sizeof(rt_ubase_t));
+    if (!ep->outbound_addr)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    if (pci->link_gen < 1)
+    {
+        pci->link_gen = -1;
+        rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
+    }
+
+    epc = rt_calloc(1, sizeof(*epc));
+    if (!epc)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    epc->name = rt_dm_dev_get_name(dev);
+    epc->rc_dev = dev;
+    epc->ops = &dw_pcie_ep_ops;
+    epc->priv = ep;
+
+    if ((err = rt_pci_ep_register(epc)))
+    {
+        goto _fail;
+    }
+    ep->epc = epc;
+
+    if (rt_dm_dev_prop_read_u8(dev, "max-functions", &epc->max_functions))
+    {
+        epc->max_functions = 1;
+    }
+
+    for (rt_uint8_t func_no = 0; func_no < epc->max_functions; ++func_no)
+    {
+        ep_func = rt_calloc(1, sizeof(*ep_func));
+
+        if (!ep_func)
+        {
+            err = -RT_ENOMEM;
+            goto _fail;
+        }
+
+        ep_func->func_no = func_no;
+        ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, PCIY_MSI);
+        ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, PCIY_MSIX);
+
+        rt_list_init(&ep_func->list);
+        rt_list_insert_after(&ep->func_nodes, &ep_func->list);
+    }
+
+    if (ep->ops->ep_init)
+    {
+        ep->ops->ep_init(ep);
+    }
+
+    if ((err = rt_pci_ep_mem_init(epc, ep->aspace, ep->aspace_size, ep->page_size)))
+    {
+        goto _fail;
+    }
+
+    ep->msi_mem = rt_pci_ep_mem_alloc(epc, &ep->msi_mem_phy, ep->page_size);
+
+    if (!ep->msi_mem)
+    {
+        LOG_E("Failed to reserve memory for MSI/MSI-X");
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    if ((err = dw_pcie_ep_init_complete(ep)))
+    {
+        goto _fail;
+    }
+
+    return RT_EOK;
+
+_fail:
+    dw_pcie_ep_exit(ep);
+
+    return err;
+}
diff --git a/components/drivers/pci/host/dw/pcie-dw_host.c b/components/drivers/pci/host/dw/pcie-dw_host.c
new file mode 100644
index 00000000000..05a3dc77799
--- /dev/null
+++ b/components/drivers/pci/host/dw/pcie-dw_host.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-23     GuEe-GUI     first version
+ */
+
+#define DBG_TAG "pcie.dw-host"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include "pcie-dw.h"
+
+static void dw_pcie_irq_ack(struct rt_pic_irq *pirq)
+{
+    int hwirq = pirq->hwirq;
+    rt_uint32_t res, bit, ctrl;
+    struct dw_pcie_port *port = pirq->pic->priv_data;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
+    res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+    bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+    dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, RT_BIT(bit));
+}
+
+static void dw_pcie_irq_mask(struct rt_pic_irq *pirq)
+{
+    rt_ubase_t level;
+    int hwirq = pirq->hwirq;
+    rt_uint32_t res, bit, ctrl;
+    struct dw_pcie_port *port = pirq->pic->priv_data;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    rt_pci_msi_mask_irq(pirq);
+
+    level = rt_spin_lock_irqsave(&port->lock);
+
+    ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
+    res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+    bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+    port->irq_mask[ctrl] |= RT_BIT(bit);
+    dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
+
+    rt_spin_unlock_irqrestore(&port->lock, level);
+}
+
+static void dw_pcie_irq_unmask(struct rt_pic_irq *pirq)
+{
+    rt_ubase_t level;
+    int hwirq = pirq->hwirq;
+    rt_uint32_t res, bit, ctrl;
+    struct dw_pcie_port *port = pirq->pic->priv_data;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    rt_pci_msi_unmask_irq(pirq);
+
+    level = rt_spin_lock_irqsave(&port->lock);
+
+    ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
+    res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+    bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+    port->irq_mask[ctrl] &= ~RT_BIT(bit);
+    dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
+
+    rt_spin_unlock_irqrestore(&port->lock, level);
+}
+
+static void dw_pcie_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
+{
+    rt_uint64_t msi_target;
+    struct dw_pcie_port *port = pirq->pic->priv_data;
+
+    msi_target = (rt_uint64_t)port->msi_data_phy;
+
+    msg->address_lo = rt_lower_32_bits(msi_target);
+    msg->address_hi = rt_upper_32_bits(msi_target);
+
+    msg->data = pirq->hwirq;
+}
+
+static int dw_pcie_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
+{
+    rt_ubase_t level;
+    int irq, hwirq;
+    struct rt_pic_irq *pirq;
+    struct dw_pcie_port *port = pic->priv_data;
+
+    level = rt_spin_lock_irqsave(&port->lock);
+    hwirq = rt_bitmap_next_clear_bit(port->msi_map, 0, port->irq_count);
+
+    if (hwirq >= port->irq_count)
+    {
+        irq = -RT_EEMPTY;
+        goto _out_lock;
+    }
+
+    pirq = rt_pic_find_irq(pic, hwirq);
+
+    irq = rt_pic_config_irq(pic, hwirq, hwirq);
+    pirq->mode = RT_IRQ_MODE_EDGE_RISING;
+
+    rt_bitmap_set_bit(port->msi_map, hwirq);
+
+_out_lock:
+    rt_spin_unlock_irqrestore(&port->lock, level);
+
+    return irq;
+}
+
+static void dw_pcie_irq_free_msi(struct rt_pic *pic, int irq)
+{
+    rt_ubase_t level;
+    struct rt_pic_irq *pirq;
+    struct dw_pcie_port *port = pic->priv_data;
+
+    pirq = rt_pic_find_pirq(pic, irq);
+
+    if (!pirq)
+    {
+        return;
+    }
+
+    level = rt_spin_lock_irqsave(&port->lock);
+    rt_bitmap_clear_bit(port->msi_map, pirq->hwirq);
+    rt_spin_unlock_irqrestore(&port->lock, level);
+}
+
+const static struct rt_pic_ops dw_pci_msi_ops =
+{
+    .name = "DWPCI-MSI",
+    .irq_ack = dw_pcie_irq_ack,
+    .irq_mask = dw_pcie_irq_mask,
+    .irq_unmask = dw_pcie_irq_unmask,
+    .irq_compose_msi_msg = dw_pcie_compose_msi_msg,
+    .irq_alloc_msi = dw_pcie_irq_alloc_msi,
+    .irq_free_msi = dw_pcie_irq_free_msi,
+    .flags = RT_PIC_F_IRQ_ROUTING,
+};
+
+/* MSI int handler */
+rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port)
+{
+    rt_err_t err;
+    int i, pos;
+    rt_bitmap_t status;
+    rt_uint32_t num_ctrls;
+    struct rt_pic_irq *pirq;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+    struct rt_pic *msi_pic = port->msi_pic;
+
+    err = -RT_EEMPTY;
+    num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
+
+    for (i = 0; i < num_ctrls; ++i)
+    {
+        status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+                    (i * MSI_REG_CTRL_BLOCK_SIZE));
+
+        if (!status)
+        {
+            continue;
+        }
+
+        err = RT_EOK;
+
+        rt_bitmap_for_each_set_bit(&status, pos, MAX_MSI_IRQS_PER_CTRL)
+        {
+            pirq = rt_pic_find_irq(msi_pic, pos + i * MAX_MSI_IRQS_PER_CTRL);
+
+            dw_pcie_irq_ack(pirq);
+
+            rt_pic_handle_isr(pirq);
+        }
+    }
+
+    return err;
+}
+
+static void dw_pcie_msi_isr(int irqno, void *param)
+{
+    struct dw_pcie_port *port = param;
+
+    dw_handle_msi_irq(port);
+}
+
+void dw_pcie_free_msi(struct dw_pcie_port *port)
+{
+    if (port->msi_irq >= 0)
+    {
+        rt_hw_interrupt_mask(port->msi_irq);
+        rt_pic_detach_irq(port->msi_irq, port);
+    }
+
+    if (port->msi_data)
+    {
+        struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+        rt_dma_free_coherent(pci->dev, sizeof(rt_uint64_t), port->msi_data,
+                port->msi_data_phy);
+    }
+}
+
+void dw_pcie_msi_init(struct dw_pcie_port *port)
+{
+#ifdef RT_PCI_MSI
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+    rt_uint64_t msi_target = (rt_uint64_t)port->msi_data_phy;
+
+    /* Program the msi_data_phy */
+    dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, rt_lower_32_bits(msi_target));
+    dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, rt_upper_32_bits(msi_target));
+#endif
+}
+
+static const struct rt_pci_ops dw_child_pcie_ops;
+static const struct rt_pci_ops dw_pcie_ops;
+
+rt_err_t dw_pcie_host_init(struct dw_pcie_port *port)
+{
+    rt_err_t err;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+    struct rt_device *dev = pci->dev;
+    struct rt_pci_host_bridge *bridge;
+
+    rt_spin_lock_init(&port->lock);
+
+    rt_dm_dev_get_address_by_name(dev, "config", &port->cfg0_addr, &port->cfg0_size);
+
+    if (port->cfg0_addr)
+    {
+        port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
+
+        if (!port->cfg0_base)
+        {
+            return -RT_EIO;
+        }
+    }
+    else if (!port->cfg0_base)
+    {
+        LOG_E("Missing 'config' reg space");
+    }
+
+    if (!(bridge = rt_pci_host_bridge_alloc(0)))
+    {
+        return -RT_ENOMEM;
+    }
+
+    bridge->parent.ofw_node = dev->ofw_node;
+
+    if ((err = rt_pci_host_bridge_init(bridge)))
+    {
+        goto _err_free_bridge;
+    }
+
+    port->bridge = bridge;
+
+    for (int i = 0; i < bridge->bus_regions_nr; ++i)
+    {
+        struct rt_pci_bus_region *region = &bridge->bus_regions[i];
+
+        switch (region->flags)
+        {
+        case PCI_BUS_REGION_F_IO:
+            port->io_addr = region->cpu_addr;
+            port->io_bus_addr = region->phy_addr;
+            port->io_size = region->size;
+            break;
+
+        case PCI_BUS_REGION_F_NONE:
+            port->cfg0_size = region->size;
+            port->cfg0_addr = region->cpu_addr;
+
+            if (!pci->dbi_base)
+            {
+                pci->dbi_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
+
+                if (!pci->dbi_base)
+                {
+                    LOG_E("Error with ioremap");
+                    return -RT_ENOMEM;
+                }
+            }
+            break;
+
+        default:
+            break;
+        }
+    }
+
+    if (!port->cfg0_base && port->cfg0_addr)
+    {
+        port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
+
+        if (!port->cfg0_base)
+        {
+            return -RT_ENOMEM;
+        }
+    }
+
+    if (rt_dm_dev_prop_read_u32(dev, "num-viewport", &pci->num_viewport))
+    {
+        pci->num_viewport = 2;
+    }
+
+    if (pci->link_gen < 1)
+    {
+        pci->link_gen = -1;
+        rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
+    }
+
+    /*
+     * If a specific SoC driver needs to change the default number of vectors,
+     * it needs to implement the set_irq_count callback.
+     */
+    if (!port->ops->set_irq_count)
+    {
+        port->irq_count = MSI_DEF_NUM_VECTORS;
+    }
+    else
+    {
+        port->ops->set_irq_count(port);
+
+        if (port->irq_count > MAX_MSI_IRQS || port->irq_count == 0)
+        {
+            LOG_E("Invalid count of irq = %d", port->irq_count);
+
+            return -RT_EINVAL;
+        }
+    }
+
+    if (!port->ops->msi_host_init)
+    {
+        port->msi_pic = rt_calloc(1, sizeof(*port->msi_pic));
+
+        if (!port->msi_pic)
+        {
+            return -RT_ENOMEM;
+        }
+
+        port->msi_pic->priv_data = port;
+        port->msi_pic->ops = &dw_pci_msi_ops;
+        rt_pic_linear_irq(port->msi_pic, port->irq_count);
+        rt_pic_user_extends(port->msi_pic);
+
+        if (port->msi_irq)
+        {
+            rt_hw_interrupt_install(port->msi_irq, dw_pcie_msi_isr, port, "dwc-pci-msi");
+            rt_hw_interrupt_umask(port->msi_irq);
+        }
+
+        port->msi_data = rt_dma_alloc_coherent(pci->dev, sizeof(rt_uint64_t),
+                &port->msi_data_phy);
+
+        if (!port->msi_data)
+        {
+            err = -RT_ENOMEM;
+            goto _err_free_msi;
+        }
+    }
+    else
+    {
+        if ((err = port->ops->msi_host_init(port)))
+        {
+            return err;
+        }
+    }
+
+    /* Set default bus ops */
+    bridge->ops = &dw_pcie_ops;
+    bridge->child_ops = &dw_child_pcie_ops;
+
+    if (port->ops->host_init && (err = port->ops->host_init(port)))
+    {
+        goto _err_free_msi;
+    }
+
+    bridge->sysdata = port;
+
+    if ((err = rt_pci_host_bridge_probe(bridge)))
+    {
+        goto _err_free_msi;
+    }
+
+    return RT_EOK;
+
+_err_free_msi:
+    if (!port->ops->msi_host_init)
+    {
+        dw_pcie_free_msi(port);
+
+        rt_pic_cancel_irq(port->msi_pic);
+        rt_free(port->msi_pic);
+        port->msi_pic = RT_NULL;
+    }
+
+_err_free_bridge:
+    rt_pci_host_bridge_free(bridge);
+    port->bridge = RT_NULL;
+
+    return err;
+}
+
+void dw_pcie_host_deinit(struct dw_pcie_port *port)
+{
+    if (!port->ops->msi_host_init)
+    {
+        dw_pcie_free_msi(port);
+    }
+}
+
+void dw_pcie_host_free(struct dw_pcie_port *port)
+{
+    if (!port->ops->msi_host_init)
+    {
+        dw_pcie_free_msi(port);
+
+        rt_pic_cancel_irq(port->msi_pic);
+        rt_free(port->msi_pic);
+    }
+
+    if (port->bridge)
+    {
+        rt_pci_host_bridge_free(port->bridge);
+    }
+}
+
+static void *dw_pcie_other_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
+{
+    int type;
+    rt_uint32_t busdev;
+    struct dw_pcie_port *port = bus->sysdata;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    /*
+     * Checking whether the link is up here is a last line of defense
+     * against platforms that forward errors on the system bus as
+     * SError upon PCI configuration transactions issued when the link is down.
+     * This check is racy by definition and does not stop the system from
+     * triggering an SError if the link goes down after this check is performed.
+     */
+    if (!dw_pcie_link_up(pci))
+    {
+        return RT_NULL;
+    }
+
+    busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(RT_PCI_SLOT(devfn)) |
+            PCIE_ATU_FUNC(RT_PCI_FUNC(devfn));
+
+    if (rt_pci_is_root_bus(bus->parent))
+    {
+        type = PCIE_ATU_TYPE_CFG0;
+    }
+    else
+    {
+        type = PCIE_ATU_TYPE_CFG1;
+    }
+
+    dw_pcie_prog_outbound_atu(pci, 0, type, port->cfg0_addr, busdev, port->cfg0_size);
+
+    return port->cfg0_base + reg;
+}
+
+static rt_err_t dw_pcie_other_read_conf(struct rt_pci_bus *bus,
+            rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
+{
+    rt_err_t err;
+    struct dw_pcie_port *port = bus->sysdata;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    err = rt_pci_bus_read_config_uxx(bus, devfn, reg, width, value);
+
+    if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
+    {
+        dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+                port->io_addr, port->io_bus_addr, port->io_size);
+    }
+
+    return err;
+}
+
+static rt_err_t dw_pcie_other_write_conf(struct rt_pci_bus *bus,
+            rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
+{
+    rt_err_t err;
+    struct dw_pcie_port *port = bus->sysdata;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    err = rt_pci_bus_write_config_uxx(bus, devfn, reg, width, value);
+
+    if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
+    {
+        dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+                port->io_addr, port->io_bus_addr, port->io_size);
+    }
+
+    return err;
+}
+
+static const struct rt_pci_ops dw_child_pcie_ops =
+{
+    .map = dw_pcie_other_conf_map,
+    .read = dw_pcie_other_read_conf,
+    .write = dw_pcie_other_write_conf,
+};
+
+void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
+{
+    struct dw_pcie_port *port = bus->sysdata;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    if (RT_PCI_SLOT(devfn) > 0)
+    {
+        return RT_NULL;
+    }
+
+    return pci->dbi_base + reg;
+}
+
+static const struct rt_pci_ops dw_pcie_ops =
+{
+    .map = dw_pcie_own_conf_map,
+    .read = rt_pci_bus_read_config_uxx,
+    .write = rt_pci_bus_write_config_uxx,
+};
+
+void dw_pcie_setup_rc(struct dw_pcie_port *port)
+{
+    rt_uint32_t val, num_ctrls;
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    /*
+     * Enable DBI read-only registers for writing/updating configuration.
+     * Write permission gets disabled towards the end of this function.
+     */
+    dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
+
+    dw_pcie_setup(pci);
+
+    if (!port->ops->msi_host_init)
+    {
+        num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
+
+        /* Initialize IRQ Status array */
+        for (int ctrl = 0; ctrl < num_ctrls; ++ctrl)
+        {
+            port->irq_mask[ctrl] = ~0;
+
+            dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+                    (ctrl * MSI_REG_CTRL_BLOCK_SIZE), port->irq_mask[ctrl]);
+            dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+                    (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ~0);
+        }
+    }
+
+    /* Setup RC BARs */
+    dw_pcie_writel_dbi(pci, PCIR_BAR(0), PCIM_BAR_MEM_TYPE_64);
+    dw_pcie_writel_dbi(pci, PCIR_BAR(1), PCIM_BAR_MEM_TYPE_32);
+
+    /* Setup interrupt pins */
+    val = dw_pcie_readl_dbi(pci, PCIR_INTLINE);
+    val &= 0xffff00ff;
+    val |= 0x00000100;
+    dw_pcie_writel_dbi(pci, PCIR_INTLINE, val);
+
+    /* Setup bus numbers */
+    val = dw_pcie_readl_dbi(pci, PCIR_PRIBUS_1);
+    val &= 0xff000000;
+    val |= 0x00ff0100;
+    dw_pcie_writel_dbi(pci, PCIR_PRIBUS_1, val);
+
+    /* Setup command register */
+    val = dw_pcie_readl_dbi(pci, PCIR_COMMAND);
+    val &= 0xffff0000;
+    val |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN;
+    dw_pcie_writel_dbi(pci, PCIR_COMMAND, val);
+
+    /*
+     * If the platform provides its own child bus config accesses, it means
+     * the platform uses its own address translation component rather than
+     * ATU, so we should not program the ATU here.
+     */
+    if (pci->port.bridge->child_ops == &dw_child_pcie_ops)
+    {
+        int atu_idx = 0;
+        struct rt_pci_host_bridge *bridge = port->bridge;
+
+        /* Get last memory resource entry */
+        for (int i = 0; i < bridge->bus_regions_nr; ++i)
+        {
+            struct rt_pci_bus_region *region = &bridge->bus_regions[i];
+
+            if (region->flags != PCI_BUS_REGION_F_MEM)
+            {
+                continue;
+            }
+
+            if (pci->num_viewport <= ++atu_idx)
+            {
+                break;
+            }
+
+            dw_pcie_prog_outbound_atu(pci, atu_idx,
+                    PCIE_ATU_TYPE_MEM, region->cpu_addr,
+                    region->phy_addr, region->size);
+        }
+
+        if (port->io_size)
+        {
+            if (pci->num_viewport > ++atu_idx)
+            {
+                dw_pcie_prog_outbound_atu(pci, atu_idx,
+                        PCIE_ATU_TYPE_IO, port->io_addr,
+                        port->io_bus_addr, port->io_size);
+            }
+            else
+            {
+                pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
+            }
+        }
+
+        if (pci->num_viewport <= atu_idx)
+        {
+            LOG_W("Resources exceed number of ATU entries (%d)", pci->num_viewport);
+        }
+    }
+
+    dw_pcie_writel_dbi(pci, PCIR_BAR(0), 0);
+
+    /* Program correct class for RC */
+    dw_pcie_writew_dbi(pci, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
+
+    val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+    val |= PORT_LOGIC_SPEED_CHANGE;
+    dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+    dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
+}
diff --git a/components/drivers/pci/host/dw/pcie-dw_platfrom.c b/components/drivers/pci/host/dw/pcie-dw_platfrom.c
new file mode 100644
index 00000000000..878ac41658d
--- /dev/null
+++ b/components/drivers/pci/host/dw/pcie-dw_platfrom.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-23     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+#include <rtdevice.h>
+
+#define DBG_TAG "pcie.dw.platfrom"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include "pcie-dw.h"
+
+struct dw_dw_platform_pcie_soc_data
+{
+    enum dw_pcie_device_mode mode;
+};
+
+struct dw_platform_pcie
+{
+    struct dw_pcie *pci;
+    struct rt_syscon *regmap;
+    const struct dw_dw_platform_pcie_soc_data *soc_data;
+};
+
+static rt_err_t dw_platform_pcie_host_init(struct dw_pcie_port *port)
+{
+    struct dw_pcie *pci = to_dw_pcie_from_port(port);
+
+    dw_pcie_setup_rc(port);
+    dw_pcie_wait_for_link(pci);
+    dw_pcie_msi_init(port);
+
+    return RT_EOK;
+}
+
+static void dw_platform_set_irq_count(struct dw_pcie_port *pp)
+{
+    pp->irq_count = MAX_MSI_IRQS;
+}
+
+static const struct dw_pcie_host_ops dw_platform_pcie_host_ops =
+{
+    .host_init = dw_platform_pcie_host_init,
+    .set_irq_count = dw_platform_set_irq_count,
+};
+
+static rt_err_t dw_platform_pcie_establish_link(struct dw_pcie *pci)
+{
+    return RT_EOK;
+}
+
+static const struct dw_pcie_ops dw_platform_pcie_ops =
+{
+    .start_link = dw_platform_pcie_establish_link,
+};
+
+static rt_err_t dw_platform_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+    struct dw_pcie *pci = to_dw_pcie_from_endpoint(ep);
+
+    for (int bar = 0; bar < PCI_STD_NUM_BARS; ++bar)
+    {
+        dw_pcie_ep_reset_bar(pci, bar);
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_platform_pcie_ep_raise_irq(struct dw_pcie_ep *ep,
+        rt_uint8_t func_no, enum rt_pci_ep_irq type, unsigned irq)
+{
+    switch (type)
+    {
+    case RT_PCI_EP_IRQ_LEGACY:
+        return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+
+    case RT_PCI_EP_IRQ_MSI:
+        return dw_pcie_ep_raise_msi_irq(ep, func_no, irq);
+
+    case RT_PCI_EP_IRQ_MSIX:
+        return dw_pcie_ep_raise_msix_irq(ep, func_no, irq);
+
+    default:
+        LOG_E("Unknown IRQ type = %d", type);
+    }
+
+    return RT_EOK;
+}
+
+static const struct dw_pcie_ep_ops dw_platform_pcie_ep_ops =
+{
+    .ep_init = dw_platform_pcie_ep_init,
+    .raise_irq = dw_platform_pcie_ep_raise_irq,
+};
+
+static rt_err_t dw_platform_add_pcie_port(struct dw_platform_pcie *plat_pcie,
+        struct rt_device *dev)
+{
+    rt_err_t err;
+    struct dw_pcie *pci = plat_pcie->pci;
+    struct dw_pcie_port *port = &pci->port;
+
+    port->sys_irq = rt_dm_dev_get_irq(dev, 1);
+
+    if (port->sys_irq < 0)
+    {
+        return port->sys_irq;
+    }
+
+#ifdef RT_PCI_MSI
+    port->msi_irq = rt_dm_dev_get_irq(dev, 0);
+
+    if (port->msi_irq < 0)
+    {
+        return port->msi_irq;
+    }
+#endif
+
+    port->ops = &dw_platform_pcie_host_ops;
+
+    if ((err = dw_pcie_host_init(port)))
+    {
+        LOG_E("Failed to initialize host");
+        return err;
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_platform_add_pcie_ep(struct dw_platform_pcie *plat_pcie,
+        struct rt_device *dev)
+{
+    rt_err_t err;
+    struct dw_pcie *pci = plat_pcie->pci;
+    struct dw_pcie_ep *ep = &pci->endpoint;
+
+    pci->dbi_base2 = rt_dm_dev_iomap_by_name(dev, "dbi2");
+
+    if (!pci->dbi_base2)
+    {
+        return -RT_EIO;
+    }
+
+    err = rt_dm_dev_get_address_by_name(dev, "addr_space", &ep->aspace, &ep->aspace_size);
+
+    if (err)
+    {
+        rt_iounmap(pci->dbi_base2);
+        return err;
+    }
+
+    ep->ops = &dw_platform_pcie_ep_ops;
+
+    if ((err = dw_pcie_ep_init(ep)))
+    {
+        LOG_E("Failed to initialize endpoint");
+        return err;
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t dw_platform_pcie_probe(struct rt_platform_device *pdev)
+{
+    rt_err_t err;
+    struct dw_pcie *pci = RT_NULL;
+    struct dw_platform_pcie *plat_pcie;
+    struct rt_device *dev = &pdev->parent;
+
+    if (!(plat_pcie = rt_calloc(1, sizeof(*plat_pcie))))
+    {
+        return -RT_ENOMEM;
+    }
+
+    if (!(pci = rt_calloc(1, sizeof(*pci))))
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    plat_pcie->pci = pci;
+    plat_pcie->soc_data = pdev->id->data;
+
+    pci->dev = dev;
+    pci->ops = &dw_platform_pcie_ops;
+    pci->dbi_base = rt_dm_dev_iomap_by_name(dev, "dbi");
+
+    if (!pci->dbi_base)
+    {
+        err = -RT_EIO;
+        goto _fail;
+    }
+
+    dev->user_data = plat_pcie;
+
+    switch (plat_pcie->soc_data->mode)
+    {
+    case DW_PCIE_RC_TYPE:
+        if (!RT_KEY_ENABLED(RT_PCI_DW_HOST))
+        {
+            err = -RT_ENOSYS;
+            goto _fail;
+        }
+
+        if ((err = dw_platform_add_pcie_port(plat_pcie, dev)))
+        {
+            goto _fail;
+        }
+        break;
+
+    case DW_PCIE_EP_TYPE:
+        if (!RT_KEY_ENABLED(RT_PCI_DW_EP))
+        {
+            err = -RT_ENOSYS;
+            goto _fail;
+        }
+
+        if ((err = dw_platform_add_pcie_ep(plat_pcie, dev)))
+        {
+            goto _fail;
+        }
+        break;
+
+    default:
+        LOG_E("Invalid device type %d", plat_pcie->soc_data->mode);
+        err = -RT_EINVAL;
+        goto _fail;
+    }
+
+    return RT_EOK;
+
+_fail:
+    if (pci)
+    {
+        if (pci->dbi_base)
+        {
+            rt_iounmap(pci->dbi_base);
+        }
+
+        rt_free(pci);
+    }
+
+    rt_free(plat_pcie);
+
+    return err;
+}
+
+static rt_err_t dw_platform_pcie_remove(struct rt_platform_device *pdev)
+{
+    struct dw_platform_pcie *plat_pcie = pdev->parent.user_data;
+
+    rt_pci_host_bridge_remove(plat_pcie->pci->port.bridge);
+    dw_pcie_host_free(&plat_pcie->pci->port);
+
+    rt_iounmap(plat_pcie->pci->dbi_base);
+    rt_free(plat_pcie->pci);
+
+    rt_free(plat_pcie);
+
+    return RT_EOK;
+}
+
+static const struct dw_dw_platform_pcie_soc_data dw_platform_pcie_rc_soc_data =
+{
+    .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_dw_platform_pcie_soc_data dw_platform_pcie_ep_soc_data =
+{
+    .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct rt_ofw_node_id dw_platform_pcie_ofw_ids[] =
+{
+    { .compatible = "snps,dw-pcie", .data = &dw_platform_pcie_rc_soc_data },
+    { .compatible = "snps,dw-pcie-ep", .data = &dw_platform_pcie_ep_soc_data },
+    { /* sentinel */ }
+};
+
+static struct rt_platform_driver dw_platform_pcie_driver =
+{
+    .name = "dw-pcie",
+    .ids = dw_platform_pcie_ofw_ids,
+
+    .probe = dw_platform_pcie_probe,
+    .remove = dw_platform_pcie_remove,
+};
+RT_PLATFORM_DRIVER_EXPORT(dw_platform_pcie_driver);