aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/814-qe-0003-irqchip-qeic-merge-qeic_of_init-into-qe_ic_init.patch
blob: 877cd94e0d895a45336180af339ad0e0ce4b5a6c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
From 25694a71e1958a4d72891b99b6a2805b7a7bffff Mon Sep 17 00:00:00 2001
From: Zhao Qiang <qiang.zhao@nxp.com>
Date: Tue, 14 Mar 2017 10:40:38 +0800
Subject: [PATCH] irqchip/qeic: merge qeic_of_init into qe_ic_init

qeic_of_init just get device_node of qeic from dtb and call qe_ic_init,
pass the device_node to qe_ic_init.
So merge qeic_of_init into qe_ic_init to get the qeic node in
qe_ic_init.

Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
---
 drivers/irqchip/irq-qeic.c | 89 ++++++++++++++++++++--------------------------
 include/soc/fsl/qe/qe_ic.h |  7 ----
 2 files changed, 39 insertions(+), 57 deletions(-)

--- a/drivers/irqchip/irq-qeic.c
+++ b/drivers/irqchip/irq-qeic.c
@@ -402,27 +402,33 @@ unsigned int qe_ic_get_high_irq(struct q
 	return irq_linear_revmap(qe_ic->irqhost, irq);
 }
 
-void __init qe_ic_init(struct device_node *node, unsigned int flags,
-		       void (*low_handler)(struct irq_desc *desc),
-		       void (*high_handler)(struct irq_desc *desc))
+static int __init qe_ic_init(struct device_node *node, unsigned int flags)
 {
 	struct qe_ic *qe_ic;
 	struct resource res;
-	u32 temp = 0, ret, high_active = 0;
+	u32 temp = 0, high_active = 0;
+	int ret = 0;
+
+	if (!node)
+		return -ENODEV;
 
 	ret = of_address_to_resource(node, 0, &res);
-	if (ret)
-		return;
+	if (ret) {
+		ret = -ENODEV;
+		goto err_put_node;
+	}
 
 	qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
-	if (qe_ic == NULL)
-		return;
+	if (qe_ic == NULL) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
 
 	qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
 					       &qe_ic_host_ops, qe_ic);
 	if (qe_ic->irqhost == NULL) {
-		kfree(qe_ic);
-		return;
+		ret = -ENOMEM;
+		goto err_free_qe_ic;
 	}
 
 	qe_ic->regs = ioremap(res.start, resource_size(&res));
@@ -433,9 +439,9 @@ void __init qe_ic_init(struct device_nod
 	qe_ic->virq_low = irq_of_parse_and_map(node, 1);
 
 	if (qe_ic->virq_low == NO_IRQ) {
-		printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
-		kfree(qe_ic);
-		return;
+		pr_err("Failed to map QE_IC low IRQ\n");
+		ret = -ENOMEM;
+		goto err_domain_remove;
 	}
 
 	/* default priority scheme is grouped. If spread mode is    */
@@ -462,13 +468,24 @@ void __init qe_ic_init(struct device_nod
 	qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
 
 	irq_set_handler_data(qe_ic->virq_low, qe_ic);
-	irq_set_chained_handler(qe_ic->virq_low, low_handler);
+	irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic);
 
 	if (qe_ic->virq_high != NO_IRQ &&
 			qe_ic->virq_high != qe_ic->virq_low) {
 		irq_set_handler_data(qe_ic->virq_high, qe_ic);
-		irq_set_chained_handler(qe_ic->virq_high, high_handler);
+		irq_set_chained_handler(qe_ic->virq_high,
+					qe_ic_cascade_high_mpic);
 	}
+	of_node_put(node);
+	return 0;
+
+err_domain_remove:
+	irq_domain_remove(qe_ic->irqhost);
+err_free_qe_ic:
+	kfree(qe_ic);
+err_put_node:
+	of_node_put(node);
+	return ret;
 }
 
 void qe_ic_set_highest_priority(unsigned int virq, int high)
@@ -565,44 +582,16 @@ int qe_ic_set_high_priority(unsigned int
 	return 0;
 }
 
-static struct bus_type qe_ic_subsys = {
-	.name = "qe_ic",
-	.dev_name = "qe_ic",
-};
-
-static struct device device_qe_ic = {
-	.id = 0,
-	.bus = &qe_ic_subsys,
-};
-
-static int __init init_qe_ic_sysfs(void)
+static int __init init_qe_ic(struct device_node *node,
+			     struct device_node *parent)
 {
-	int rc;
+	int ret;
 
-	printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
+	ret = qe_ic_init(node, 0);
+	if (ret)
+		return ret;
 
-	rc = subsys_system_register(&qe_ic_subsys, NULL);
-	if (rc) {
-		printk(KERN_ERR "Failed registering qe_ic sys class\n");
-		return -ENODEV;
-	}
-	rc = device_register(&device_qe_ic);
-	if (rc) {
-		printk(KERN_ERR "Failed registering qe_ic sys device\n");
-		return -ENODEV;
-	}
 	return 0;
 }
 
-static int __init qeic_of_init(struct device_node *node,
-			       struct device_node *parent)
-{
-	if (!node)
-		return;
-	qe_ic_init(node, 0, qe_ic_cascade_low_mpic,
-		   qe_ic_cascade_high_mpic);
-	of_node_put(node);
-}
-
-IRQCHIP_DECLARE(qeic, "fsl,qe-ic", qeic_of_init);
-subsys_initcall(init_qe_ic_sysfs);
+IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic);
--- a/include/soc/fsl/qe/qe_ic.h
+++ b/include/soc/fsl/qe/qe_ic.h
@@ -54,16 +54,9 @@ enum qe_ic_grp_id {
 };
 
 #ifdef CONFIG_QUICC_ENGINE
-void qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(struct irq_desc *desc),
-		void (*high_handler)(struct irq_desc *desc));
 unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
 unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
 #else
-static inline void qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(struct irq_desc *desc),
-		void (*high_handler)(struct irq_desc *desc))
-{}
 static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
 { return 0; }
 static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
.h> #include <asm/hvm/hvm.h> #include <asm/hvm/io.h> #include <asm/hvm/support.h> #include <asm/hvm/svm/svm.h> #include <asm/hvm/svm/intr.h> #include <xen/event.h> #include <xen/kernel.h> #include <xen/domain_page.h> #include <xen/keyhandler.h> extern int svm_dbg_on; #define GUEST_SEGMENT_LIMIT 0xffffffff #define IOPM_SIZE (12 * 1024) #define MSRPM_SIZE (8 * 1024) struct vmcb_struct *alloc_vmcb(void) { struct vmcb_struct *vmcb; vmcb = alloc_xenheap_page(); if ( vmcb == NULL ) { printk(XENLOG_WARNING "Warning: failed to allocate vmcb.\n"); return NULL; } memset(vmcb, 0, PAGE_SIZE); return vmcb; } void free_vmcb(struct vmcb_struct *vmcb) { free_xenheap_page(vmcb); } struct host_save_area *alloc_host_save_area(void) { struct host_save_area *hsa; hsa = alloc_xenheap_page(); if ( hsa == NULL ) { printk(XENLOG_WARNING "Warning: failed to allocate vmcb.\n"); return NULL; } memset(hsa, 0, PAGE_SIZE); return hsa; } void free_host_save_area(struct host_save_area *hsa) { free_xenheap_page(hsa); } static int construct_vmcb(struct vcpu *v) { struct arch_svm_struct *arch_svm = &v->arch.hvm_svm; struct vmcb_struct *vmcb = arch_svm->vmcb; svm_segment_attributes_t attrib; /* Always flush the TLB on VMRUN. All guests share a single ASID (1). */ vmcb->tlb_control = 1; vmcb->guest_asid = 1; /* SVM intercepts. */ vmcb->general1_intercepts = GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI | GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT | GENERAL1_INTERCEPT_CPUID | GENERAL1_INTERCEPT_INVD | GENERAL1_INTERCEPT_HLT | GENERAL1_INTERCEPT_INVLPG | GENERAL1_INTERCEPT_INVLPGA | GENERAL1_INTERCEPT_IOIO_PROT | GENERAL1_INTERCEPT_MSR_PROT | GENERAL1_INTERCEPT_SHUTDOWN_EVT; vmcb->general2_intercepts = GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL | GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE | GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI | GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP; /* Intercept all debug-register writes. */ vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; /* Intercept all control-register accesses, except to CR2. */ vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ | CR_INTERCEPT_CR2_WRITE); /* I/O and MSR permission bitmaps. */ arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE)); if ( arch_svm->msrpm == NULL ) return -ENOMEM; memset(arch_svm->msrpm, 0xff, MSRPM_SIZE); vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); vmcb->iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); /* Virtualise EFLAGS.IF and LAPIC TPR (CR8). */ vmcb->vintr.fields.intr_masking = 1; /* Initialise event injection to no-op. */ vmcb->eventinj.bytes = 0; /* TSC. */ vmcb->tsc_offset = 0; /* Guest EFER: *must* contain SVME or VMRUN will fail. */ vmcb->efer = EFER_SVME; /* Guest segment limits. */ vmcb->cs.limit = GUEST_SEGMENT_LIMIT; vmcb->es.limit = GUEST_SEGMENT_LIMIT; vmcb->ss.limit = GUEST_SEGMENT_LIMIT; vmcb->ds.limit = GUEST_SEGMENT_LIMIT; vmcb->fs.limit = GUEST_SEGMENT_LIMIT; vmcb->gs.limit = GUEST_SEGMENT_LIMIT; /* Guest segment bases. */ vmcb->cs.base = 0; vmcb->es.base = 0; vmcb->ss.base = 0; vmcb->ds.base = 0; vmcb->fs.base = 0; vmcb->gs.base = 0; /* Guest segment AR bytes. */ attrib.bytes = 0; attrib.fields.type = 0x3; /* type = 3 */ attrib.fields.s = 1; /* code or data, i.e. not system */ attrib.fields.dpl = 0; /* DPL = 0 */ attrib.fields.p = 1; /* segment present */ attrib.fields.db = 1; /* 32-bit */ attrib.fields.g = 1; /* 4K pages in limit */ vmcb->es.attr = attrib; vmcb->ss.attr = attrib; vmcb->ds.attr = attrib; vmcb->fs.attr = attrib; vmcb->gs.attr = attrib; attrib.fields.type = 0xb; /* type=0xb -> executable/readable, accessed */ vmcb->cs.attr = attrib; /* Guest IDT. */ vmcb->idtr.base = 0; vmcb->idtr.limit = 0; /* Guest GDT. */ vmcb->gdtr.base = 0; vmcb->gdtr.limit = 0; /* Guest LDT. */ vmcb->ldtr.sel = 0; vmcb->ldtr.base = 0; vmcb->ldtr.limit = 0; vmcb->ldtr.attr.bytes = 0; /* Guest TSS. */ attrib.fields.type = 0xb; /* 32-bit TSS (busy) */ vmcb->tr.attr = attrib; vmcb->tr.base = 0; vmcb->tr.limit = 0xff; /* Guest CR0. */ vmcb->cr0 = read_cr0(); arch_svm->cpu_shadow_cr0 = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS); vmcb->cr0 |= X86_CR0_WP; /* Guest CR4. */ arch_svm->cpu_shadow_cr4 = read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE); vmcb->cr4 = arch_svm->cpu_shadow_cr4 | SVM_CR4_HOST_MASK; paging_update_paging_modes(v); vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP; if ( paging_mode_hap(v->domain) ) { vmcb->cr0 = arch_svm->cpu_shadow_cr0; vmcb->np_enable = 1; /* enable nested paging */ vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */ vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG; } return 0; } int svm_create_vmcb(struct vcpu *v) { struct arch_svm_struct *arch_svm = &v->arch.hvm_svm; int rc; if ( (arch_svm->vmcb == NULL) && (arch_svm->vmcb = alloc_vmcb()) == NULL ) { printk("Failed to create a new VMCB\n"); return -ENOMEM; } if ( (rc = construct_vmcb(v)) != 0 ) { free_vmcb(arch_svm->vmcb); arch_svm->vmcb = NULL; return rc; } arch_svm->vmcb_pa = virt_to_maddr(arch_svm->vmcb); return 0; } void svm_destroy_vmcb(struct vcpu *v) { struct arch_svm_struct *arch_svm = &v->arch.hvm_svm; if ( arch_svm->vmcb != NULL ) free_vmcb(arch_svm->vmcb); if ( arch_svm->msrpm != NULL ) { free_xenheap_pages( arch_svm->msrpm, get_order_from_bytes(MSRPM_SIZE)); arch_svm->msrpm = NULL; } arch_svm->vmcb = NULL; } void svm_do_launch(struct vcpu *v) { hvm_stts(v); /* current core is the one we intend to perform the VMRUN on */ v->arch.hvm_svm.launch_core = smp_processor_id(); v->arch.schedule_tail = arch_svm_do_resume; } static void svm_dump_sel(char *name, svm_segment_register_t *s) { printk("%s: sel=0x%04x, attr=0x%04x, limit=0x%08x, base=0x%016llx\n", name, s->sel, s->attr.bytes, s->limit, (unsigned long long)s->base); } void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb) { printk("Dumping guest's current state at %s...\n", from); printk("Size of VMCB = %d, address = %p\n", (int) sizeof(struct vmcb_struct), vmcb); printk("cr_intercepts = 0x%08x dr_intercepts = 0x%08x " "exception_intercepts = 0x%08x\n", vmcb->cr_intercepts, vmcb->dr_intercepts, vmcb->exception_intercepts); printk("general1_intercepts = 0x%08x general2_intercepts = 0x%08x\n", vmcb->general1_intercepts, vmcb->general2_intercepts); printk("iopm_base_pa = %016llx msrpm_base_pa = 0x%016llx tsc_offset = " "0x%016llx\n", (unsigned long long) vmcb->iopm_base_pa, (unsigned long long) vmcb->msrpm_base_pa, (unsigned long long) vmcb->tsc_offset); printk("tlb_control = 0x%08x vintr = 0x%016llx interrupt_shadow = " "0x%016llx\n", vmcb->tlb_control, (unsigned long long) vmcb->vintr.bytes, (unsigned long long) vmcb->interrupt_shadow); printk("exitcode = 0x%016llx exitintinfo = 0x%016llx\n", (unsigned long long) vmcb->exitcode, (unsigned long long) vmcb->exitintinfo.bytes); printk("exitinfo1 = 0x%016llx exitinfo2 = 0x%016llx \n", (unsigned long long) vmcb->exitinfo1, (unsigned long long) vmcb->exitinfo2); printk("np_enable = 0x%016llx guest_asid = 0x%03x\n", (unsigned long long) vmcb->np_enable, vmcb->guest_asid); printk("cpl = %d efer = 0x%016llx star = 0x%016llx lstar = 0x%016llx\n", vmcb->cpl, (unsigned long long) vmcb->efer, (unsigned long long) vmcb->star, (unsigned long long) vmcb->lstar); printk("CR0 = 0x%016llx CR2 = 0x%016llx\n", (unsigned long long) vmcb->cr0, (unsigned long long) vmcb->cr2); printk("CR3 = 0x%016llx CR4 = 0x%016llx\n", (unsigned long long) vmcb->cr3, (unsigned long long) vmcb->cr4); printk("RSP = 0x%016llx RIP = 0x%016llx\n", (unsigned long long) vmcb->rsp, (unsigned long long) vmcb->rip); printk("RAX = 0x%016llx RFLAGS=0x%016llx\n", (unsigned long long) vmcb->rax, (unsigned long long) vmcb->rflags); printk("DR6 = 0x%016llx, DR7 = 0x%016llx\n", (unsigned long long) vmcb->dr6, (unsigned long long) vmcb->dr7); printk("CSTAR = 0x%016llx SFMask = 0x%016llx\n", (unsigned long long) vmcb->cstar, (unsigned long long) vmcb->sfmask); printk("KernGSBase = 0x%016llx PAT = 0x%016llx \n", (unsigned long long) vmcb->kerngsbase, (unsigned long long) vmcb->g_pat); printk("H_CR3 = 0x%016llx\n", (unsigned long long)vmcb->h_cr3); /* print out all the selectors */ svm_dump_sel("CS", &vmcb->cs); svm_dump_sel("DS", &vmcb->ds); svm_dump_sel("SS", &vmcb->ss); svm_dump_sel("ES", &vmcb->es); svm_dump_sel("FS", &vmcb->fs); svm_dump_sel("GS", &vmcb->gs); svm_dump_sel("GDTR", &vmcb->gdtr); svm_dump_sel("LDTR", &vmcb->ldtr); svm_dump_sel("IDTR", &vmcb->idtr); svm_dump_sel("TR", &vmcb->tr); } static void vmcb_dump(unsigned char ch) { struct domain *d; struct vcpu *v; printk("*********** VMCB Areas **************\n"); rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) { if ( !is_hvm_domain(d) ) continue; printk("\n>>> Domain %d <<<\n", d->domain_id); for_each_vcpu ( d, v ) { printk("\tVCPU %d\n", v->vcpu_id); svm_dump_vmcb("key_handler", v->arch.hvm_svm.vmcb); } } rcu_read_unlock(&domlist_read_lock); printk("**************************************\n"); } void setup_vmcb_dump(void) { register_keyhandler('v', vmcb_dump, "dump AMD-V VMCBs"); } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */