aboutsummaryrefslogtreecommitdiffstats
path: root/package/mac80211/patches/310-pending_work.patch
blob: 57dcecaf100c8030f94546028bc98a8a08d4577d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -189,7 +189,6 @@ struct ath_txq {
 	u32 axq_ampdu_depth;
 	bool stopped;
 	bool axq_tx_inprogress;
-	bool txq_flush_inprogress;
 	struct list_head axq_acq;
 	struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
 	struct list_head txq_fifo_pending;
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -373,6 +373,7 @@ void ath_beacon_tasklet(unsigned long da
 			ath_dbg(common, ATH_DBG_BSTUCK,
 				"missed %u consecutive beacons\n",
 				sc->beacon.bmisscnt);
+			ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
 			ath9k_hw_bstuck_nfcal(ah);
 		} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
 			ath_dbg(common, ATH_DBG_BSTUCK,
@@ -450,16 +451,6 @@ void ath_beacon_tasklet(unsigned long da
 		sc->beacon.updateslot = OK;
 	}
 	if (bfaddr != 0) {
-		/*
-		 * Stop any current dma and put the new frame(s) on the queue.
-		 * This should never fail since we check above that no frames
-		 * are still pending on the queue.
-		 */
-		if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
-			ath_err(common, "beacon queue %u did not stop?\n",
-				sc->beacon.beaconq);
-		}
-
 		/* NB: cabq traffic should already be queued and primed */
 		ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
 		ath9k_hw_txstart(ah, sc->beacon.beaconq);
@@ -780,7 +771,7 @@ void ath9k_set_beaconing_status(struct a
 		ah->imask &= ~ATH9K_INT_SWBA;
 		ath9k_hw_set_interrupts(ah, ah->imask);
 		tasklet_kill(&sc->bcon_tasklet);
-		ath9k_hw_stoptxdma(ah, sc->beacon.beaconq);
+		ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
 	}
 	ath9k_ps_restore(sc);
 }
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -95,9 +95,9 @@
 #define REG_READ_FIELD(_a, _r, _f) \
 	(((REG_READ(_a, _r) & _f) >> _f##_S))
 #define REG_SET_BIT(_a, _r, _f) \
-	REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
+	REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f))
 #define REG_CLR_BIT(_a, _r, _f) \
-	REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
+	REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f))
 
 #define DO_DELAY(x) do {			\
 		if ((++(x) % 64) == 0)          \
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -143,84 +143,59 @@ bool ath9k_hw_updatetxtriglevel(struct a
 }
 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
 
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
 {
-#define ATH9K_TX_STOP_DMA_TIMEOUT	4000    /* usec */
-#define ATH9K_TIME_QUANTUM		100     /* usec */
-	struct ath_common *common = ath9k_hw_common(ah);
-	struct ath9k_hw_capabilities *pCap = &ah->caps;
-	struct ath9k_tx_queue_info *qi;
-	u32 tsfLow, j, wait;
-	u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+	int i, q;
 
-	if (q >= pCap->total_queues) {
-		ath_dbg(common, ATH_DBG_QUEUE,
-			"Stopping TX DMA, invalid queue: %u\n", q);
-		return false;
-	}
+	REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
 
-	qi = &ah->txq[q];
-	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		ath_dbg(common, ATH_DBG_QUEUE,
-			"Stopping TX DMA, inactive queue: %u\n", q);
-		return false;
-	}
+	REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+	REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 
-	REG_WRITE(ah, AR_Q_TXD, 1 << q);
+	for (q = 0; q < AR_NUM_QCU; q++) {
+		for (i = 0; i < 1000; i++) {
+			if (i)
+				udelay(5);
 
-	for (wait = wait_time; wait != 0; wait--) {
-		if (ath9k_hw_numtxpending(ah, q) == 0)
-			break;
-		udelay(ATH9K_TIME_QUANTUM);
+			if (!ath9k_hw_numtxpending(ah, q))
+				break;
+		}
 	}
 
-	if (ath9k_hw_numtxpending(ah, q)) {
-		ath_dbg(common, ATH_DBG_QUEUE,
-			"%s: Num of pending TX Frames %d on Q %d\n",
-			__func__, ath9k_hw_numtxpending(ah, q), q);
-
-		for (j = 0; j < 2; j++) {
-			tsfLow = REG_READ(ah, AR_TSF_L32);
-			REG_WRITE(ah, AR_QUIET2,
-				  SM(10, AR_QUIET2_QUIET_DUR));
-			REG_WRITE(ah, AR_QUIET_PERIOD, 100);
-			REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
-			REG_SET_BIT(ah, AR_TIMER_MODE,
-				       AR_QUIET_TIMER_EN);
-
-			if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
-				break;
+	REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+	REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 
-			ath_dbg(common, ATH_DBG_QUEUE,
-				"TSF has moved while trying to set quiet time TSF: 0x%08x\n",
-				tsfLow);
-		}
+	REG_WRITE(ah, AR_Q_TXD, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
 
-		REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
+{
+#define ATH9K_TX_STOP_DMA_TIMEOUT	1000    /* usec */
+#define ATH9K_TIME_QUANTUM		100     /* usec */
+	int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+	int wait;
 
-		udelay(200);
-		REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
+	REG_WRITE(ah, AR_Q_TXD, 1 << q);
 
-		wait = wait_time;
-		while (ath9k_hw_numtxpending(ah, q)) {
-			if ((--wait) == 0) {
-				ath_err(common,
-					"Failed to stop TX DMA in 100 msec after killing last frame\n");
-				break;
-			}
+	for (wait = wait_time; wait != 0; wait--) {
+		if (wait != wait_time)
 			udelay(ATH9K_TIME_QUANTUM);
-		}
 
-		REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+		if (ath9k_hw_numtxpending(ah, q) == 0)
+			break;
 	}
 
 	REG_WRITE(ah, AR_Q_TXD, 0);
+
 	return wait != 0;
 
 #undef ATH9K_TX_STOP_DMA_TIMEOUT
 #undef ATH9K_TIME_QUANTUM
 }
-EXPORT_SYMBOL(ath9k_hw_stoptxdma);
+EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
 
 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
 {
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -676,7 +676,8 @@ void ath9k_hw_txstart(struct ath_hw *ah,
 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
 			    const struct ath9k_tx_queue_info *qinfo);
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2128,56 +2128,42 @@ static void ath9k_set_coverage_class(str
 
 static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 {
-#define ATH_FLUSH_TIMEOUT	60 /* ms */
 	struct ath_softc *sc = hw->priv;
-	struct ath_txq *txq = NULL;
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	int i, j, npend = 0;
+	int timeout = 200; /* ms */
+	int i, j;
 
+	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
 
 	cancel_delayed_work_sync(&sc->tx_complete_work);
 
-	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-		if (!ATH_TXQ_SETUP(sc, i))
-			continue;
-		txq = &sc->tx.txq[i];
-
-		if (!drop) {
-			for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
-				if (!ath9k_has_pending_frames(sc, txq))
-					break;
-				usleep_range(1000, 2000);
-			}
-		}
+	if (drop)
+		timeout = 1;
+
+	for (j = 0; j < timeout; j++) {
+		int npend = 0;
+
+		if (j)
+			usleep_range(1000, 2000);
 
-		if (drop || ath9k_has_pending_frames(sc, txq)) {
-			ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
-				txq->axq_qnum);
-			spin_lock_bh(&txq->axq_lock);
-			txq->txq_flush_inprogress = true;
-			spin_unlock_bh(&txq->axq_lock);
-
-			ath9k_ps_wakeup(sc);
-			ath9k_hw_stoptxdma(ah, txq->axq_qnum);
-			npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
-			ath9k_ps_restore(sc);
-			if (npend)
-				break;
+		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+			if (!ATH_TXQ_SETUP(sc, i))
+				continue;
 
-			ath_draintxq(sc, txq, false);
-			txq->txq_flush_inprogress = false;
+			npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
 		}
+
+		if (!npend)
+		    goto out;
 	}
 
-	if (npend) {
+	if (!ath_drain_all_txq(sc, false))
 		ath_reset(sc, false);
-		txq->txq_flush_inprogress = false;
-	}
 
+out:
 	ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
 	mutex_unlock(&sc->mutex);
+	ath9k_ps_restore(sc);
 }
 
 struct ieee80211_ops ath9k_ops = {
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -166,7 +166,7 @@ static void ath_tx_flush_tid(struct ath_
 		fi = get_frame_info(bf->bf_mpdu);
 		if (fi->retries) {
 			ath_tx_update_baw(sc, tid, fi->seqno);
-			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
 		} else {
 			ath_tx_send_normal(sc, txq, NULL, &bf_head);
 		}
@@ -1194,16 +1194,14 @@ bool ath_drain_all_txq(struct ath_softc 
 	if (sc->sc_flags & SC_OP_INVALID)
 		return true;
 
-	/* Stop beacon queue */
-	ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+	ath9k_hw_abort_tx_dma(ah);
 
-	/* Stop data queues */
+	/* Check if any queue remains active */
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-		if (ATH_TXQ_SETUP(sc, i)) {
-			txq = &sc->tx.txq[i];
-			ath9k_hw_stoptxdma(ah, txq->axq_qnum);
-			npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
-		}
+		if (!ATH_TXQ_SETUP(sc, i))
+			continue;
+
+		npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
 	}
 
 	if (npend)
@@ -2014,8 +2012,7 @@ static void ath_tx_processq(struct ath_s
 		spin_lock_bh(&txq->axq_lock);
 		if (list_empty(&txq->axq_q)) {
 			txq->axq_link = NULL;
-			if (sc->sc_flags & SC_OP_TXAGGR &&
-			    !txq->txq_flush_inprogress)
+			if (sc->sc_flags & SC_OP_TXAGGR)
 				ath_txq_schedule(sc, txq);
 			spin_unlock_bh(&txq->axq_lock);
 			break;
@@ -2096,7 +2093,7 @@ static void ath_tx_processq(struct ath_s
 
 		spin_lock_bh(&txq->axq_lock);
 
-		if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
+		if (sc->sc_flags & SC_OP_TXAGGR)
 			ath_txq_schedule(sc, txq);
 		spin_unlock_bh(&txq->axq_lock);
 	}
@@ -2267,18 +2264,17 @@ void ath_tx_edma_tasklet(struct ath_soft
 
 		spin_lock_bh(&txq->axq_lock);
 
-		if (!txq->txq_flush_inprogress) {
-			if (!list_empty(&txq->txq_fifo_pending)) {
-				INIT_LIST_HEAD(&bf_head);
-				bf = list_first_entry(&txq->txq_fifo_pending,
-						      struct ath_buf, list);
-				list_cut_position(&bf_head,
-						  &txq->txq_fifo_pending,
-						  &bf->bf_lastbf->list);
-				ath_tx_txqaddbuf(sc, txq, &bf_head);
-			} else if (sc->sc_flags & SC_OP_TXAGGR)
-				ath_txq_schedule(sc, txq);
-		}
+		if (!list_empty(&txq->txq_fifo_pending)) {
+			INIT_LIST_HEAD(&bf_head);
+			bf = list_first_entry(&txq->txq_fifo_pending,
+					      struct ath_buf, list);
+			list_cut_position(&bf_head,
+					  &txq->txq_fifo_pending,
+					  &bf->bf_lastbf->list);
+			ath_tx_txqaddbuf(sc, txq, &bf_head);
+		} else if (sc->sc_flags & SC_OP_TXAGGR)
+			ath_txq_schedule(sc, txq);
+
 		spin_unlock_bh(&txq->axq_lock);
 	}
 }
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -77,6 +77,9 @@ bool ieee80211_set_channel_type(struct i
 		switch (tmp->vif.bss_conf.channel_type) {
 		case NL80211_CHAN_NO_HT:
 		case NL80211_CHAN_HT20:
+			if (superchan > tmp->vif.bss_conf.channel_type)
+				break;
+
 			superchan = tmp->vif.bss_conf.channel_type;
 			break;
 		case NL80211_CHAN_HT40PLUS:
unsigned int xen_processor_pmbits = 0; /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */ static unsigned int opt_dom0_vcpus_pin; boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin); enum cpufreq_controller cpufreq_controller; static void __init setup_cpufreq_option(char *str) { if ( !strcmp(str, "dom0-kernel") ) { xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX; cpufreq_controller = FREQCTL_dom0_kernel; opt_dom0_vcpus_pin = 1; } else if ( !strcmp(str, "xen") ) { xen_processor_pmbits |= XEN_PROCESSOR_PM_PX; cpufreq_controller = FREQCTL_xen; } } custom_param("cpufreq", setup_cpufreq_option); /* Protect updates/reads (resp.) of domain_list and domain_hash. */ DEFINE_SPINLOCK(domlist_update_lock); DEFINE_RCU_READ_LOCK(domlist_read_lock); #define DOMAIN_HASH_SIZE 256 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1)) static struct domain *domain_hash[DOMAIN_HASH_SIZE]; struct domain *domain_list; struct domain *dom0; struct vcpu *idle_vcpu[NR_CPUS] __read_mostly; int current_domain_id(void) { return current->domain->domain_id; } static struct domain *alloc_domain_struct(void) { return xmalloc(struct domain); } static void free_domain_struct(struct domain *d) { xfree(d); } static void __domain_finalise_shutdown(struct domain *d) { struct vcpu *v; BUG_ON(!spin_is_locked(&d->shutdown_lock)); if ( d->is_shut_down ) return; for_each_vcpu ( d, v ) if ( !v->paused_for_shutdown ) return; d->is_shut_down = 1; if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn ) evtchn_send(d, d->suspend_evtchn); else send_guest_global_virq(dom0, VIRQ_DOM_EXC); } static void vcpu_check_shutdown(struct vcpu *v) { struct domain *d = v->domain; spin_lock(&d->shutdown_lock); if ( d->is_shutting_down ) { if ( !v->paused_for_shutdown ) vcpu_pause_nosync(v); v->paused_for_shutdown = 1; v->defer_shutdown = 0; __domain_finalise_shutdown(d); } spin_unlock(&d->shutdown_lock); } struct vcpu *alloc_vcpu( struct domain *d, unsigned int vcpu_id, unsigned int cpu_id) { struct vcpu *v; BUG_ON(d->vcpu[vcpu_id] != NULL); if ( (v = alloc_vcpu_struct()) == NULL ) return NULL; v->domain = d; v->vcpu_id = vcpu_id; v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline; v->runstate.state_entry_time = NOW(); spin_lock_init(&v->virq_lock); if ( !is_idle_domain(d) ) { set_bit(_VPF_down, &v->pause_flags); v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]); } if ( sched_init_vcpu(v, cpu_id) != 0 ) { free_vcpu_struct(v); return NULL; } if ( vcpu_initialise(v) != 0 ) { sched_destroy_vcpu(v); free_vcpu_struct(v); return NULL; } d->vcpu[vcpu_id] = v; if ( vcpu_id != 0 ) d->vcpu[v->vcpu_id-1]->next_in_list = v; /* Must be called after making new vcpu visible to for_each_vcpu(). */ vcpu_check_shutdown(v); return v; } struct vcpu *alloc_idle_vcpu(unsigned int cpu_id) { struct domain *d; struct vcpu *v; unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS; if ( (v = idle_vcpu[cpu_id]) != NULL ) return v; d = (vcpu_id == 0) ? domain_create(IDLE_DOMAIN_ID, 0, 0) : idle_vcpu[cpu_id - vcpu_id]->domain; BUG_ON(d == NULL); v = alloc_vcpu(d, vcpu_id, cpu_id); idle_vcpu[cpu_id] = v; return v; } struct domain *domain_create( domid_t domid, unsigned int domcr_flags, ssidref_t ssidref) { struct domain *d, **pd; enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2, INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 }; int init_status = 0; if ( (d = alloc_domain_struct()) == NULL ) return NULL; memset(d, 0, sizeof(*d)); d->domain_id = domid; if ( xsm_alloc_security_domain(d) != 0 ) goto fail; init_status |= INIT_xsm; atomic_set(&d->refcnt, 1); spin_lock_init(&d->domain_lock); spin_lock_init(&d->page_alloc_lock); spin_lock_init(&d->shutdown_lock); spin_lock_init(&d->hypercall_deadlock_mutex); INIT_LIST_HEAD(&d->page_list); INIT_LIST_HEAD(&d->xenpage_list); if ( domcr_flags & DOMCRF_hvm ) d->is_hvm = 1; if ( (domid == 0) && opt_dom0_vcpus_pin ) d->is_pinned = 1; if ( domcr_flags & DOMCRF_dummy ) return d; rangeset_domain_initialise(d); init_status |= INIT_rangeset; if ( !is_idle_domain(d) ) { if ( xsm_domain_create(d, ssidref) != 0 ) goto fail; d->is_paused_by_controller = 1; atomic_inc(&d->pause_count); if ( evtchn_init(d) != 0 ) goto fail; init_status |= INIT_evtchn; if ( grant_table_create(d) != 0 ) goto fail; init_status |= INIT_gnttab; } if ( arch_domain_create(d, domcr_flags) != 0 ) goto fail; init_status |= INIT_arch; d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) goto fail; if ( sched_init_domain(d) != 0 ) goto fail; if ( !is_idle_domain(d) ) { spin_lock(&domlist_update_lock); pd = &domain_list; /* NB. domain_list maintained in order of domid. */ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) if ( (*pd)->domain_id > d->domain_id ) break; d->next_in_list = *pd; d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)]; rcu_assign_pointer(*pd, d); rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d); spin_unlock(&domlist_update_lock); } return d; fail: d->is_dying = DOMDYING_dead; atomic_set(&d->refcnt, DOMAIN_DESTROYED); if ( init_status & INIT_arch ) arch_domain_destroy(d); if ( init_status & INIT_gnttab ) grant_table_destroy(d); if ( init_status & INIT_evtchn ) evtchn_destroy(d); if ( init_status & INIT_rangeset ) rangeset_domain_destroy(d); if ( init_status & INIT_xsm ) xsm_free_security_domain(d); free_domain_struct(d); return NULL; } struct domain *get_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) { if ( unlikely(!get_domain(d)) ) d = NULL; break; } } rcu_read_unlock(&domlist_read_lock); return d; } struct domain *rcu_lock_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) return d; } rcu_read_unlock(&domlist_read_lock); return NULL; } int domain_kill(struct domain *d) { int rc = 0; if ( d == current->domain ) return -EINVAL; /* Protected by domctl_lock. */ switch ( d->is_dying ) { case DOMDYING_alive: domain_pause(d); d->is_dying = DOMDYING_dying; spin_barrier(&d->domain_lock); evtchn_destroy(d); gnttab_release_mappings(d); /* fallthrough */ case DOMDYING_dying: rc = domain_relinquish_resources(d); page_scrub_kick(); if ( rc != 0 ) { BUG_ON(rc != -EAGAIN); break; } d->is_dying = DOMDYING_dead; put_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); /* fallthrough */ case DOMDYING_dead: break; } return rc; } void __domain_crash(struct domain *d) { if ( d->is_shutting_down ) { /* Print nothing: the domain is already shutting down. */ } else if ( d == current->domain ) { printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n", d->domain_id, current->vcpu_id, smp_processor_id()); show_execution_state(guest_cpu_user_regs()); } else { printk("Domain %d reported crashed by domain %d on cpu#%d:\n", d->domain_id, current->domain->domain_id, smp_processor_id()); } domain_shutdown(d, SHUTDOWN_crash); } void __domain_crash_synchronous(void) { __domain_crash(current->domain); /* * Flush multicall state before dying if a multicall is in progress. * This shouldn't be necessary, but some architectures are calling * domain_crash_synchronous() when they really shouldn't (i.e., from * within hypercall context). */ if ( this_cpu(mc_state).flags != 0 ) { dprintk(XENLOG_ERR, "FIXME: synchronous domain crash during a multicall!\n"); this_cpu(mc_state).flags = 0; } vcpu_end_shutdown_deferral(current); for ( ; ; ) do_softirq(); } void domain_shutdown(struct domain *d, u8 reason) { struct vcpu *v; if ( d->domain_id == 0 ) dom0_shutdown(reason); spin_lock(&d->shutdown_lock); if ( d->is_shutting_down ) { spin_unlock(&d->shutdown_lock); return; } d->is_shutting_down = 1; d->shutdown_code = reason; smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ for_each_vcpu ( d, v ) { if ( v->defer_shutdown ) continue; vcpu_pause_nosync(v); v->paused_for_shutdown = 1; } __domain_finalise_shutdown(d); spin_unlock(&d->shutdown_lock); } void domain_resume(struct domain *d) { struct vcpu *v; /* * Some code paths assume that shutdown status does not get reset under * their feet (e.g., some assertions make this assumption). */ domain_pause(d); spin_lock(&d->shutdown_lock); d->is_shutting_down = d->is_shut_down = 0; for_each_vcpu ( d, v ) { if ( v->paused_for_shutdown ) vcpu_unpause(v); v->paused_for_shutdown = 0; } spin_unlock(&d->shutdown_lock); domain_unpause(d); } int vcpu_start_shutdown_deferral(struct vcpu *v) { if ( v->defer_shutdown ) return 1; v->defer_shutdown = 1; smp_mb(); /* set deferral status /then/ check for shutdown */ if ( unlikely(v->domain->is_shutting_down) ) vcpu_check_shutdown(v); return v->defer_shutdown; } void vcpu_end_shutdown_deferral(struct vcpu *v) { v->defer_shutdown = 0; smp_mb(); /* clear deferral status /then/ check for shutdown */ if ( unlikely(v->domain->is_shutting_down) ) vcpu_check_shutdown(v); } void domain_pause_for_debugger(void) { struct domain *d = current->domain; struct vcpu *v; atomic_inc(&d->pause_count); if ( test_and_set_bool(d->is_paused_by_controller) ) domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */ for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); send_guest_global_virq(dom0, VIRQ_DEBUGGER); } /* Complete domain destroy after RCU readers are not holding old references. */ static void complete_domain_destroy(struct rcu_head *head) { struct domain *d = container_of(head, struct domain, rcu); struct vcpu *v; int i; for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) { if ( (v = d->vcpu[i]) == NULL ) continue; vcpu_destroy(v); sched_destroy_vcpu(v); } rangeset_domain_destroy(d); grant_table_destroy(d); arch_domain_destroy(d); sched_destroy_domain(d); for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) if ( (v = d->vcpu[i]) != NULL ) free_vcpu_struct(v); if ( d->target != NULL ) put_domain(d->target); xsm_free_security_domain(d); free_domain_struct(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); } /* Release resources belonging to task @p. */ void domain_destroy(struct domain *d) { struct domain **pd; atomic_t old, new; BUG_ON(!d->is_dying); /* May be already destroyed, or get_domain() can race us. */ _atomic_set(old, 0); _atomic_set(new, DOMAIN_DESTROYED); old = atomic_compareandswap(old, new, &d->refcnt); if ( _atomic_read(old) != 0 ) return; /* Delete from task list and task hashtable. */ spin_lock(&domlist_update_lock); pd = &domain_list; while ( *pd != d ) pd = &(*pd)->next_in_list; rcu_assign_pointer(*pd, d->next_in_list); pd = &domain_hash[DOMAIN_HASH(d->domain_id)]; while ( *pd != d ) pd = &(*pd)->next_in_hashbucket; rcu_assign_pointer(*pd, d->next_in_hashbucket); spin_unlock(&domlist_update_lock); /* Schedule RCU asynchronous completion of domain destroy. */ call_rcu(&d->rcu, complete_domain_destroy); } void vcpu_pause(struct vcpu *v) { ASSERT(v != current); atomic_inc(&v->pause_count); vcpu_sleep_sync(v); } void vcpu_pause_nosync(struct vcpu *v) { atomic_inc(&v->pause_count); vcpu_sleep_nosync(v); } void vcpu_unpause(struct vcpu *v) { if ( atomic_dec_and_test(&v->pause_count) ) vcpu_wake(v); } void domain_pause(struct domain *d) { struct vcpu *v; ASSERT(d != current->domain); atomic_inc(&d->pause_count); for_each_vcpu( d, v ) vcpu_sleep_sync(v); } void domain_unpause(struct domain *d) { struct vcpu *v; if ( atomic_dec_and_test(&d->pause_count) ) for_each_vcpu( d, v ) vcpu_wake(v); } void domain_pause_by_systemcontroller(struct domain *d) { domain_pause(d); if ( test_and_set_bool(d->is_paused_by_controller) ) domain_unpause(d); } void domain_unpause_by_systemcontroller(struct domain *d) { if ( test_and_clear_bool(d->is_paused_by_controller) ) domain_unpause(d); } int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt) { struct vcpu *v = d->vcpu[vcpuid]; BUG_ON(v->is_initialised); return arch_set_info_guest(v, ctxt); } void vcpu_reset(struct vcpu *v) { struct domain *d = v->domain; vcpu_pause(v); domain_lock(d); arch_vcpu_reset(v); set_bit(_VPF_down, &v->pause_flags); v->fpu_initialised = 0; v->fpu_dirtied = 0; v->is_polling = 0; v->is_initialised = 0; v->nmi_pending = 0; v->mce_pending = 0; v->old_trap_priority = VCPU_TRAP_NONE; v->trap_priority = VCPU_TRAP_NONE; clear_bit(_VPF_blocked, &v->pause_flags); domain_unlock(v->domain); vcpu_unpause(v); } long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg) { struct domain *d = current->domain; struct vcpu *v; struct vcpu_guest_context *ctxt; long rc = 0; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return -EINVAL; if ( (v = d->vcpu[vcpuid]) == NULL ) return -ENOENT; switch ( cmd ) { case VCPUOP_initialise: if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) return -ENOMEM; if ( copy_from_guest(ctxt, arg, 1) ) { xfree(ctxt); return -EFAULT; } domain_lock(d); rc = -EEXIST; if ( !v->is_initialised ) rc = boot_vcpu(d, vcpuid, ctxt); domain_unlock(d); xfree(ctxt); break; case VCPUOP_up: if ( !v->is_initialised ) return -EINVAL; if ( test_and_clear_bit(_VPF_down, &v->pause_flags) ) vcpu_wake(v); break; case VCPUOP_down: if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) vcpu_sleep_nosync(v); break; case VCPUOP_is_up: rc = !test_bit(_VPF_down, &v->pause_flags); break; case VCPUOP_get_runstate_info: { struct vcpu_runstate_info runstate; vcpu_runstate_get(v, &runstate); if ( copy_to_guest(arg, &runstate, 1) ) rc = -EFAULT; break; } case VCPUOP_set_periodic_timer: { struct vcpu_set_periodic_timer set; if ( copy_from_guest(&set, arg, 1) ) return -EFAULT; if ( set.period_ns < MILLISECS(1) ) return -EINVAL; v->periodic_period = set.period_ns; vcpu_force_reschedule(v); break; } case VCPUOP_stop_periodic_timer: v->periodic_period = 0; vcpu_force_reschedule(v); break; case VCPUOP_set_singleshot_timer: { struct vcpu_set_singleshot_timer set; if ( v != current ) return -EINVAL; if ( copy_from_guest(&set, arg, 1) ) return -EFAULT; if ( (set.flags & VCPU_SSHOTTMR_future) && (set.timeout_abs_ns < NOW()) ) return -ETIME; if ( v->singleshot_timer.cpu != smp_processor_id() ) { stop_timer(&v->singleshot_timer); v->singleshot_timer.cpu = smp_processor_id(); } set_timer(&v->singleshot_timer, set.timeout_abs_ns); break; } case VCPUOP_stop_singleshot_timer: if ( v != current ) return -EINVAL; stop_timer(&v->singleshot_timer); break; case VCPUOP_send_nmi: if ( !guest_handle_is_null(arg) ) return -EINVAL; if ( !test_and_set_bool(v->nmi_pending) ) vcpu_kick(v); break; default: rc = arch_do_vcpu_op(cmd, v, arg); break; } return rc; } long vm_assist(struct domain *p, unsigned int cmd, unsigned int type) { if ( type > MAX_VMASST_TYPE ) return -EINVAL; switch ( cmd ) { case VMASST_CMD_enable: set_bit(type, &p->vm_assist); return 0; case VMASST_CMD_disable: clear_bit(type, &p->vm_assist); return 0; } return -ENOSYS; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */