aboutsummaryrefslogtreecommitdiffstats
path: root/package/network/config/ltq-vdsl-app/files/dsl_control
blob: 34642dbda5aa851822d3bee9f01a315b77170d6a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
generated by cgit v1.2.3 (git 2.25.1) at 2025-09-30 17:30:07 +0000
 


pan class="cp">#define MAX_MSIX_TABLE_ENTRIES  2048
#define MAX_MSIX_TABLE_PAGES    8
struct pci_dev_info {
    unsigned is_extfn;
    unsigned is_virtfn;
    struct {
        u8 bus;
        u8 devfn;
    } physfn;
};

struct pci_dev {
    struct list_head alldevs_list;
    struct list_head domain_list;

    struct list_head msi_list;
    unsigned int msix_nr_entries, msix_used_entries;
    struct {
        unsigned long first, last;
    } msix_table, msix_pba;
    int msix_table_refcnt[MAX_MSIX_TABLE_PAGES];
    int msix_table_idx[MAX_MSIX_TABLE_PAGES];
    spinlock_t msix_table_lock;

    struct domain *domain;
    const u8 bus;
    const u8 devfn;
    struct pci_dev_info info;
};

#define for_each_pdev(domain, pdev) \
    list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list)

/*
 * The pcidevs_lock protect alldevs_list, and the assignment for the 
 * devices, it also sync the access to the msi capability that is not
 * interrupt handling related (the mask bit register).
 */

extern spinlock_t pcidevs_lock;

enum {
    DEV_TYPE_PCIe_ENDPOINT,
    DEV_TYPE_PCIe_BRIDGE,       // PCIe root port, switch
    DEV_TYPE_PCIe2PCI_BRIDGE,   // PCIe-to-PCI/PCIx bridge
    DEV_TYPE_LEGACY_PCI_BRIDGE, // Legacy PCI bridge
    DEV_TYPE_PCI,
};

int pci_device_detect(u8 bus, u8 dev, u8 func);
int scan_pci_devices(void);
int pdev_type(u8 bus, u8 devfn);
int find_upstream_bridge(u8 *bus, u8 *devfn, u8 *secbus);
struct pci_dev *alloc_pdev(u8 bus, u8 devfn);
void free_pdev(struct pci_dev *pdev);
struct pci_dev *pci_lock_pdev(int bus, int devfn);
struct pci_dev *pci_lock_domain_pdev(struct domain *d, int bus, int devfn);

void pci_release_devices(struct domain *d);
int pci_add_device(u8 bus, u8 devfn);
int pci_remove_device(u8 bus, u8 devfn);
int pci_add_device_ext(u8 bus, u8 devfn, struct pci_dev_info *info);
struct pci_dev *pci_get_pdev(int bus, int devfn);
struct pci_dev *pci_get_pdev_by_domain(struct domain *d, int bus, int devfn);

uint8_t pci_conf_read8(
    unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg);
uint16_t pci_conf_read16(
    unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg);
uint32_t pci_conf_read32(
    unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg);
void pci_conf_write8(
    unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg,
    uint8_t data);
void pci_conf_write16(
    unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg,
    uint16_t data);
void pci_conf_write32(
    unsigned int bus, unsigned int dev, unsigned int func, unsigned int reg,
    uint32_t data);
uint32_t pci_conf_read(uint32_t cf8, uint8_t offset, uint8_t bytes);
void pci_conf_write(uint32_t cf8, uint8_t offset, uint8_t bytes, uint32_t data);
int pci_mmcfg_read(unsigned int seg, unsigned int bus,
                   unsigned int devfn, int reg, int len, u32 *value);
int pci_mmcfg_write(unsigned int seg, unsigned int bus,
                    unsigned int devfn, int reg, int len, u32 value);
int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap);
int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap);
int pci_find_ext_capability(int seg, int bus, int devfn, int cap);

int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable);
void msixtbl_pt_unregister(struct domain *d, int pirq);
void pci_enable_acs(struct pci_dev *pdev);

#endif /* __XEN_PCI_H__ */