aboutsummaryrefslogtreecommitdiffstats
path: root/3rdparty/imgui/examples/imgui_impl_opengl3.cpp
blob: 11109817a0d54022e9c35995a5e8dd53c6795f4f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
// dear imgui: Renderer for OpenGL3 / OpenGL ES2 / OpenGL ES3 (modern OpenGL with shaders / programmatic pipeline)
// This needs to be used along with a Platform Binding (e.g. GLFW, SDL, Win32, custom..)
// (Note: We are using GL3W as a helper library to access OpenGL functions since there is no standard header to access modern OpenGL functions easily. Alternatives are GLEW, Glad, etc..)

// Implemented features:
//  [X] Renderer: User texture binding. Use 'GLuint' OpenGL texture identifier as void*/ImTextureID. Read the FAQ about ImTextureID in imgui.cpp.

// You can copy and use unmodified imgui_impl_* files in your project. See main.cpp for an example of using this.
// If you are new to dear imgui, read examples/README.txt and read the documentation at the top of imgui.cpp.
// https://github.com/ocornut/imgui

// CHANGELOG
// (minor and older changes stripped away, please see git history for details)
//  2018-08-29: OpenGL: Added support for more OpenGL loaders: glew and glad, with comments indicative that any loader can be used.
//  2018-08-09: OpenGL: Default to OpenGL ES 3 on iOS and Android. GLSL version default to "#version 300 ES".
//  2018-07-30: OpenGL: Support for GLSL 300 ES and 410 core. Fixes for Emscripten compilation.
//  2018-07-10: OpenGL: Support for more GLSL versions (based on the GLSL version string). Added error output when shaders fail to compile/link.
//  2018-06-08: Misc: Extracted imgui_impl_opengl3.cpp/.h away from the old combined GLFW/SDL+OpenGL3 examples.
//  2018-06-08: OpenGL: Use draw_data->DisplayPos and draw_data->DisplaySize to setup projection matrix and clipping rectangle.
//  2018-05-25: OpenGL: Removed unnecessary backup/restore of GL_ELEMENT_ARRAY_BUFFER_BINDING since this is part of the VAO state.
//  2018-05-14: OpenGL: Making the call to glBindSampler() optional so 3.2 context won't fail if the function is a NULL pointer.
//  2018-03-06: OpenGL: Added const char* glsl_version parameter to ImGui_ImplOpenGL3_Init() so user can override the GLSL version e.g. "#version 150".
//  2018-02-23: OpenGL: Create the VAO in the render function so the setup can more easily be used with multiple shared GL context.
//  2018-02-16: Misc: Obsoleted the io.RenderDrawListsFn callback and exposed ImGui_ImplSdlGL3_RenderDrawData() in the .h file so you can call it yourself.
//  2018-01-07: OpenGL: Changed GLSL shader version from 330 to 150.
//  2017-09-01: OpenGL: Save and restore current bound sampler. Save and restore current polygon mode.
//  2017-05-01: OpenGL: Fixed save and restore of current blend func state.
//  2017-05-01: OpenGL: Fixed save and restore of current GL_ACTIVE_TEXTURE.
//  2016-09-05: OpenGL: Fixed save and restore of current scissor rectangle.
//  2016-07-29: OpenGL: Explicitly setting GL_UNPACK_ROW_LENGTH to reduce issues because SDL changes it. (#752)

//----------------------------------------
// OpenGL    GLSL      GLSL
// version   version   string
//----------------------------------------
//  2.0       110       "#version 110"
//  2.1       120
//  3.0       130
//  3.1       140
//  3.2       150       "#version 150"
//  3.3       330
//  4.0       400
//  4.1       410       "#version 410 core"
//  4.2       420
//  4.3       430
//  ES 2.0    100       "#version 100"
//  ES 3.0    300       "#version 300 es"
//----------------------------------------

#if defined(_MSC_VER) && !defined(_CRT_SECURE_NO_WARNINGS)
#define _CRT_SECURE_NO_WARNINGS
#endif

#include "imgui.h"
#include "imgui_impl_opengl3.h"
#include <stdio.h>
#if defined(_MSC_VER) && _MSC_VER <= 1500 // MSVC 2008 or earlier
#include <stddef.h>     // intptr_t
#else
#include <stdint.h>     // intptr_t
#endif
#if defined(__APPLE__)
#include "TargetConditionals.h"
#endif

// iOS, Android and Emscripten can use GL ES 3
// Call ImGui_ImplOpenGL3_Init() with "#version 300 es"
#if (defined(__APPLE__) && TARGET_OS_IOS) || (defined(__ANDROID__)) || (defined(__EMSCRIPTEN__))
#define USE_GL_ES3
#endif

#ifdef USE_GL_ES3
// OpenGL ES 3
#include <GLES3/gl3.h>  // Use GL ES 3
#else
// Regular OpenGL
// About OpenGL function loaders: modern OpenGL doesn't have a standard header file and requires individual function pointers to be loaded manually. 
// Helper libraries are often used for this purpose! Here we are supporting a few common ones: gl3w, glew, glad.
// You may use another loader/header of your choice (glext, glLoadGen, etc.), or chose to manually implement your own.
#if defined(IMGUI_IMPL_OPENGL_LOADER_GL3W)
#include <GL/gl3w.h>
#elif defined(IMGUI_IMPL_OPENGL_LOADER_GLEW)
#include <GL/glew.h>
#elif defined(IMGUI_IMPL_OPENGL_LOADER_GLAD)
#include <glad/glad.h>
#else
#include IMGUI_IMPL_OPENGL_LOADER_CUSTOM
#endif
#endif

// OpenGL Data
static char         g_GlslVersionString[32] = "";
static GLuint       g_FontTexture = 0;
static GLuint       g_ShaderHandle = 0, g_VertHandle = 0, g_FragHandle = 0;
static int          g_AttribLocationTex = 0, g_AttribLocationProjMtx = 0;
static int          g_AttribLocationPosition = 0, g_AttribLocationUV = 0, g_AttribLocationColor = 0;
static unsigned int g_VboHandle = 0, g_ElementsHandle = 0;

// Functions
bool    ImGui_ImplOpenGL3_Init(const char* glsl_version)
{
    // Store GLSL version string so we can refer to it later in case we recreate shaders. Note: GLSL version is NOT the same as GL version. Leave this to NULL if unsure.
#ifdef USE_GL_ES3
    if (glsl_version == NULL)
        glsl_version = "#version 300 es";
#else
    if (glsl_version == NULL)
        glsl_version = "#version 130";
#endif
    IM_ASSERT((int)strlen(glsl_version) + 2 < IM_ARRAYSIZE(g_GlslVersionString));
    strcpy(g_GlslVersionString, glsl_version);
    strcat(g_GlslVersionString, "\n");
    return true;
}

void    ImGui_ImplOpenGL3_Shutdown()
{
    ImGui_ImplOpenGL3_DestroyDeviceObjects();
}

void    ImGui_ImplOpenGL3_NewFrame()
{
    if (!g_FontTexture)
        ImGui_ImplOpenGL3_CreateDeviceObjects();
}

// OpenGL3 Render function.
// (this used to be set in io.RenderDrawListsFn and called by ImGui::Render(), but you can now call this directly from your main loop)
// Note that this implementation is little overcomplicated because we are saving/setting up/restoring every OpenGL state explicitly, in order to be able to run within any OpenGL engine that doesn't do so.
void    ImGui_ImplOpenGL3_RenderDrawData(ImDrawData* draw_data)
{
    // Avoid rendering when minimized, scale coordinates for retina displays (screen coordinates != framebuffer coordinates)
    ImGuiIO& io = ImGui::GetIO();
    int fb_width = (int)(draw_data->DisplaySize.x * io.DisplayFramebufferScale.x);
    int fb_height = (int)(draw_data->DisplaySize.y * io.DisplayFramebufferScale.y);
    if (fb_width <= 0 || fb_height <= 0)
        return;
    draw_data->ScaleClipRects(io.DisplayFramebufferScale);

    // Backup GL state
    GLenum last_active_texture; glGetIntegerv(GL_ACTIVE_TEXTURE, (GLint*)&last_active_texture);
    glActiveTexture(GL_TEXTURE0);
    GLint last_program; glGetIntegerv(GL_CURRENT_PROGRAM, &last_program);
    GLint last_texture; glGetIntegerv(GL_TEXTURE_BINDING_2D, &last_texture);
#ifdef GL_SAMPLER_BINDING
    GLint last_sampler; glGetIntegerv(GL_SAMPLER_BINDING, &last_sampler);
#endif
    GLint last_array_buffer; glGetIntegerv(GL_ARRAY_BUFFER_BINDING, &last_array_buffer);
    GLint last_vertex_array; glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &last_vertex_array);
#ifdef GL_POLYGON_MODE
    GLint last_polygon_mode[2]; glGetIntegerv(GL_POLYGON_MODE, last_polygon_mode);
#endif
    GLint last_viewport[4]; glGetIntegerv(GL_VIEWPORT, last_viewport);
    GLint last_scissor_box[4]; glGetIntegerv(GL_SCISSOR_BOX, last_scissor_box);
    GLenum last_blend_src_rgb; glGetIntegerv(GL_BLEND_SRC_RGB, (GLint*)&last_blend_src_rgb);
    GLenum last_blend_dst_rgb; glGetIntegerv(GL_BLEND_DST_RGB, (GLint*)&last_blend_dst_rgb);
    GLenum last_blend_src_alpha; glGetIntegerv(GL_BLEND_SRC_ALPHA, (GLint*)&last_blend_src_alpha);
    GLenum last_blend_dst_alpha; glGetIntegerv(GL_BLEND_DST_ALPHA, (GLint*)&last_blend_dst_alpha);
    GLenum last_blend_equation_rgb; glGetIntegerv(GL_BLEND_EQUATION_RGB, (GLint*)&last_blend_equation_rgb);
    GLenum last_blend_equation_alpha; glGetIntegerv(GL_BLEND_EQUATION_ALPHA, (GLint*)&last_blend_equation_alpha);
    GLboolean last_enable_blend = glIsEnabled(GL_BLEND);
    GLboolean last_enable_cull_face = glIsEnabled(GL_CULL_FACE);
    GLboolean last_enable_depth_test = glIsEnabled(GL_DEPTH_TEST);
    GLboolean last_enable_scissor_test = glIsEnabled(GL_SCISSOR_TEST);

    // Setup render state: alpha-blending enabled, no face culling, no depth testing, scissor enabled, polygon fill
    glEnable(GL_BLEND);
    glBlendEquation(GL_FUNC_ADD);
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    glDisable(GL_CULL_FACE);
    glDisable(GL_DEPTH_TEST);
    glEnable(GL_SCISSOR_TEST);
#ifdef GL_POLYGON_MODE
    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
#endif

    // Setup viewport, orthographic projection matrix
    // Our visible imgui space lies from draw_data->DisplayPos (top left) to draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayMin is typically (0,0) for single viewport apps.
    glViewport(0, 0, (GLsizei)fb_width, (GLsizei)fb_height);
    float L = draw_data->DisplayPos.x;
    float R = draw_data->DisplayPos.x + draw_data->DisplaySize.x;
    float T = draw_data->DisplayPos.y;
    float B = draw_data->DisplayPos.y + draw_data->DisplaySize.y;
    const float ortho_projection[4][4] =
    {
        { 2.0f/(R-L),   0.0f,         0.0f,   0.0f },
        { 0.0f,         2.0f/(T-B),   0.0f,   0.0f },
        { 0.0f,         0.0f,        -1.0f,   0.0f },
        { (R+L)/(L-R),  (T+B)/(B-T),  0.0f,   1.0f },
    };
    glUseProgram(g_ShaderHandle);
    glUniform1i(g_AttribLocationTex, 0);
    glUniformMatrix4fv(g_AttribLocationProjMtx, 1, GL_FALSE, &ortho_projection[0][0]);
#ifdef GL_SAMPLER_BINDING
    glBindSampler(0, 0); // We use combined texture/sampler state. Applications using GL 3.3 may set that otherwise.
#endif
    // Recreate the VAO every time
    // (This is to easily allow multiple GL contexts. VAO are not shared among GL contexts, and we don't track creation/deletion of windows so we don't have an obvious key to use to cache them.)
    GLuint vao_handle = 0;
    glGenVertexArrays(1, &vao_handle);
    glBindVertexArray(vao_handle);
    glBindBuffer(GL_ARRAY_BUFFER, g_VboHandle);
    glEnableVertexAttribArray(g_AttribLocationPosition);
    glEnableVertexAttribArray(g_AttribLocationUV);
    glEnableVertexAttribArray(g_AttribLocationColor);
    glVertexAttribPointer(g_AttribLocationPosition, 2, GL_FLOAT, GL_FALSE, sizeof(ImDrawVert), (GLvoid*)IM_OFFSETOF(ImDrawVert, pos));
    glVertexAttribPointer(g_AttribLocationUV, 2, GL_FLOAT, GL_FALSE, sizeof(ImDrawVert), (GLvoid*)IM_OFFSETOF(ImDrawVert, uv));
    glVertexAttribPointer(g_AttribLocationColor, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(ImDrawVert), (GLvoid*)IM_OFFSETOF(ImDrawVert, col));

    // Draw
    ImVec2 pos = draw_data->DisplayPos;
    for (int n = 0; n < draw_data->CmdListsCount; n++)
    {
        const ImDrawList* cmd_list = draw_data->CmdLists[n];
        const ImDrawIdx* idx_buffer_offset = 0;

        glBindBuffer(GL_ARRAY_BUFFER, g_VboHandle);
        glBufferData(GL_ARRAY_BUFFER, (GLsizeiptr)cmd_list->VtxBuffer.Size * sizeof(ImDrawVert), (const GLvoid*)cmd_list->VtxBuffer.Data, GL_STREAM_DRAW);

        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_ElementsHandle);
        glBufferData(GL_ELEMENT_ARRAY_BUFFER, (GLsizeiptr)cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx), (const GLvoid*)cmd_list->IdxBuffer.Data, GL_STREAM_DRAW);

        for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++)
        {
            const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i];
            if (pcmd->UserCallback)
            {
                // User callback (registered via ImDrawList::AddCallback)
                pcmd->UserCallback(cmd_list, pcmd);
            }
            else
            {
                ImVec4 clip_rect = ImVec4(pcmd->ClipRect.x - pos.x, pcmd->ClipRect.y - pos.y, pcmd->ClipRect.z - pos.x, pcmd->ClipRect.w - pos.y);
                if (clip_rect.x < fb_width && clip_rect.y < fb_height && clip_rect.z >= 0.0f && clip_rect.w >= 0.0f)
                {
                    // Apply scissor/clipping rectangle
                    glScissor((int)clip_rect.x, (int)(fb_height - clip_rect.w), (int)(clip_rect.z - clip_rect.x), (int)(clip_rect.w - clip_rect.y));

                    // Bind texture, Draw
                    glBindTexture(GL_TEXTURE_2D, (GLuint)(intptr_t)pcmd->TextureId);
                    glDrawElements(GL_TRIANGLES, (GLsizei)pcmd->ElemCount, sizeof(ImDrawIdx) == 2 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_INT, idx_buffer_offset);
                }
            }
            idx_buffer_offset += pcmd->ElemCount;
        }
    }
    glDeleteVertexArrays(1, &vao_handle);

    // Restore modified GL state
    glUseProgram(last_program);
    glBindTexture(GL_TEXTURE_2D, last_texture);
#ifdef GL_SAMPLER_BINDING
    glBindSampler(0, last_sampler);
#endif
    glActiveTexture(last_active_texture);
    glBindVertexArray(last_vertex_array);
    glBindBuffer(GL_ARRAY_BUFFER, last_array_buffer);
    glBlendEquationSeparate(last_blend_equation_rgb, last_blend_equation_alpha);
    glBlendFuncSeparate(last_blend_src_rgb, last_blend_dst_rgb, last_blend_src_alpha, last_blend_dst_alpha);
    if (last_enable_blend) glEnable(GL_BLEND); else glDisable(GL_BLEND);
    if (last_enable_cull_face) glEnable(GL_CULL_FACE); else glDisable(GL_CULL_FACE);
    if (last_enable_depth_test) glEnable(GL_DEPTH_TEST); else glDisable(GL_DEPTH_TEST);
    if (last_enable_scissor_test) glEnable(GL_SCISSOR_TEST); else glDisable(GL_SCISSOR_TEST);
#ifdef GL_POLYGON_MODE
    glPolygonMode(GL_FRONT_AND_BACK, (GLenum)last_polygon_mode[0]);
#endif
    glViewport(last_viewport[0], last_viewport[1], (GLsizei)last_viewport[2], (GLsizei)last_viewport[3]);
    glScissor(last_scissor_box[0], last_scissor_box[1], (GLsizei)last_scissor_box[2], (GLsizei)last_scissor_box[3]);
}

bool ImGui_ImplOpenGL3_CreateFontsTexture()
{
    // Build texture atlas
    ImGuiIO& io = ImGui::GetIO();
    unsigned char* pixels;
    int width, height;
    io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);   // Load as RGBA 32-bits (75% of the memory is wasted, but default font is so small) because it is more likely to be compatible with user's existing shaders. If your ImTextureId represent a higher-level concept than just a GL texture id, consider calling GetTexDataAsAlpha8() instead to save on GPU memory.

    // Upload texture to graphics system
    GLint last_texture;
    glGetIntegerv(GL_TEXTURE_BINDING_2D, &last_texture);
    glGenTextures(1, &g_FontTexture);
    glBindTexture(GL_TEXTURE_2D, g_FontTexture);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);

    // Store our identifier
    io.Fonts->TexID = (ImTextureID)(intptr_t)g_FontTexture;

    // Restore state
    glBindTexture(GL_TEXTURE_2D, last_texture);

    return true;
}

void ImGui_ImplOpenGL3_DestroyFontsTexture()
{
    if (g_FontTexture)
    {
        ImGuiIO& io = ImGui::GetIO();
        glDeleteTextures(1, &g_FontTexture);
        io.Fonts->TexID = 0;
        g_FontTexture = 0;
    }
}

// If you get an error please report on github. You may try different GL context version or GLSL version.
static bool CheckShader(GLuint handle, const char* desc)
{
    GLint status = 0, log_length = 0;
    glGetShaderiv(handle, GL_COMPILE_STATUS, &status);
    glGetShaderiv(handle, GL_INFO_LOG_LENGTH, &log_length);
    if ((GLboolean)status == GL_FALSE)
        fprintf(stderr, "ERROR: ImGui_ImplOpenGL3_CreateDeviceObjects: failed to compile %s!\n", desc);
    if (log_length > 0)
    {
        ImVector<char> buf;
        buf.resize((int)(log_length + 1));
        glGetShaderInfoLog(handle, log_length, NULL, (GLchar*)buf.begin());
        fprintf(stderr, "%s\n", buf.begin());
    }
    return (GLboolean)status == GL_TRUE;
}

// If you get an error please report on github. You may try different GL context version or GLSL version.
static bool CheckProgram(GLuint handle, const char* desc)
{
    GLint status = 0, log_length = 0;
    glGetProgramiv(handle, GL_LINK_STATUS, &status);
    glGetProgramiv(handle, GL_INFO_LOG_LENGTH, &log_length);
    if ((GLboolean)status == GL_FALSE)
        fprintf(stderr, "ERROR: ImGui_ImplOpenGL3_CreateDeviceObjects: failed to link %s!\n", desc);
    if (log_length > 0)
    {
        ImVector<char> buf;
        buf.resize((int)(log_length + 1));
        glGetProgramInfoLog(handle, log_length, NULL, (GLchar*)buf.begin());
        fprintf(stderr, "%s\n", buf.begin());
    }
    return (GLboolean)status == GL_TRUE;
}

bool    ImGui_ImplOpenGL3_CreateDeviceObjects()
{
    // Backup GL state
    GLint last_texture, last_array_buffer, last_vertex_array;
    glGetIntegerv(GL_TEXTURE_BINDING_2D, &last_texture);
    glGetIntegerv(GL_ARRAY_BUFFER_BINDING, &last_array_buffer);
    glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &last_vertex_array);

    // Parse GLSL version string
    int glsl_version = 130;
    sscanf(g_GlslVersionString, "#version %d", &glsl_version);

    const GLchar* vertex_shader_glsl_120 =
        "uniform mat4 ProjMtx;\n"
        "attribute vec2 Position;\n"
        "attribute vec2 UV;\n"
        "attribute vec4 Color;\n"
        "varying vec2 Frag_UV;\n"
        "varying vec4 Frag_Color;\n"
        "void main()\n"
        "{\n"
        "    Frag_UV = UV;\n"
        "    Frag_Color = Color;\n"
        "    gl_Position = ProjMtx * vec4(Position.xy,0,1);\n"
        "}\n";

    const GLchar* vertex_shader_glsl_130 =
        "uniform mat4 ProjMtx;\n"
        "in vec2 Position;\n"
        "in vec2 UV;\n"
        "in vec4 Color;\n"
        "out vec2 Frag_UV;\n"
        "out vec4 Frag_Color;\n"
        "void main()\n"
        "{\n"
        "    Frag_UV = UV;\n"
        "    Frag_Color = Color;\n"
        "    gl_Position = ProjMtx * vec4(Position.xy,0,1);\n"
        "}\n";

    const GLchar* vertex_shader_glsl_300_es =
        "precision mediump float;\n"
        "layout (location = 0) in vec2 Position;\n"
        "layout (location = 1) in vec2 UV;\n"
        "layout (location = 2) in vec4 Color;\n"
        "uniform mat4 ProjMtx;\n"
        "out vec2 Frag_UV;\n"
        "out vec4 Frag_Color;\n"
        "void main()\n"
        "{\n"
        "    Frag_UV = UV;\n"
        "    Frag_Color = Color;\n"
        "    gl_Position = ProjMtx * vec4(Position.xy,0,1);\n"
        "}\n";

    const GLchar* vertex_shader_glsl_410_core =
        "layout (location = 0) in vec2 Position;\n"
        "layout (location = 1) in vec2 UV;\n"
        "layout (location = 2) in vec4 Color;\n"
        "uniform mat4 ProjMtx;\n"
        "out vec2 Frag_UV;\n"
        "out vec4 Frag_Color;\n"
        "void main()\n"
        "{\n"
        "    Frag_UV = UV;\n"
        "    Frag_Color = Color;\n"
        "    gl_Position = ProjMtx * vec4(Position.xy,0,1);\n"
        "}\n";

    const GLchar* fragment_shader_glsl_120 =
        "#ifdef GL_ES\n"
        "    precision mediump float;\n"
        "#endif\n"
        "uniform sampler2D Texture;\n"
        "varying vec2 Frag_UV;\n"
        "varying vec4 Frag_Color;\n"
        "void main()\n"
        "{\n"
        "    gl_FragColor = Frag_Color * texture2D(Texture, Frag_UV.st);\n"
        "}\n";

    const GLchar* fragment_shader_glsl_130 =
        "uniform sampler2D Texture;\n"
        "in vec2 Frag_UV;\n"
        "in vec4 Frag_Color;\n"
        "out vec4 Out_Color;\n"
        "void main()\n"
        "{\n"
        "    Out_Color = Frag_Color * texture(Texture, Frag_UV.st);\n"
        "}\n";

    const GLchar* fragment_shader_glsl_300_es =
        "precision mediump float;\n"
        "uniform sampler2D Texture;\n"
        "in vec2 Frag_UV;\n"
        "in vec4 Frag_Color;\n"
        "layout (location = 0) out vec4 Out_Color;\n"
        "void main()\n"
        "{\n"
        "    Out_Color = Frag_Color * texture(Texture, Frag_UV.st);\n"
        "}\n";

    const GLchar* fragment_shader_glsl_410_core =
        "in vec2 Frag_UV;\n"
        "in vec4 Frag_Color;\n"
        "uniform sampler2D Texture;\n"
        "layout (location = 0) out vec4 Out_Color;\n"
        "void main()\n"
        "{\n"
        "    Out_Color = Frag_Color * texture(Texture, Frag_UV.st);\n"
        "}\n";

    // Select shaders matching our GLSL versions
    const GLchar* vertex_shader = NULL;
    const GLchar* fragment_shader = NULL;
    if (glsl_version < 130)
    {
        vertex_shader = vertex_shader_glsl_120;
        fragment_shader = fragment_shader_glsl_120;
    }
    else if (glsl_version == 410)
    {
        vertex_shader = vertex_shader_glsl_410_core;
        fragment_shader = fragment_shader_glsl_410_core;
    }
    else if (glsl_version == 300)
    {
        vertex_shader = vertex_shader_glsl_300_es;
        fragment_shader = fragment_shader_glsl_300_es;
    }
    else
    {
        vertex_shader = vertex_shader_glsl_130;
        fragment_shader = fragment_shader_glsl_130;
    }

    // Create shaders
    const GLchar* vertex_shader_with_version[2] = { g_GlslVersionString, vertex_shader };
    g_VertHandle = glCreateShader(GL_VERTEX_SHADER);
    glShaderSource(g_VertHandle, 2, vertex_shader_with_version, NULL);
    glCompileShader(g_VertHandle);
    CheckShader(g_VertHandle, "vertex shader");

    const GLchar* fragment_shader_with_version[2] = { g_GlslVersionString, fragment_shader };
    g_FragHandle = glCreateShader(GL_FRAGMENT_SHADER);
    glShaderSource(g_FragHandle, 2, fragment_shader_with_version, NULL);
    glCompileShader(g_FragHandle);
    CheckShader(g_FragHandle, "fragment shader");

    g_ShaderHandle = glCreateProgram();
    glAttachShader(g_ShaderHandle, g_VertHandle);
    glAttachShader(g_ShaderHandle, g_FragHandle);
    glLinkProgram(g_ShaderHandle);
    CheckProgram(g_ShaderHandle, "shader program");

    g_AttribLocationTex = glGetUniformLocation(g_ShaderHandle, "Texture");
    g_AttribLocationProjMtx = glGetUniformLocation(g_ShaderHandle, "ProjMtx");
    g_AttribLocationPosition = glGetAttribLocation(g_ShaderHandle, "Position");
    g_AttribLocationUV = glGetAttribLocation(g_ShaderHandle, "UV");
    g_AttribLocationColor = glGetAttribLocation(g_ShaderHandle, "Color");

    // Create buffers
    glGenBuffers(1, &g_VboHandle);
    glGenBuffers(1, &g_ElementsHandle);

    ImGui_ImplOpenGL3_CreateFontsTexture();

    // Restore modified GL state
    glBindTexture(GL_TEXTURE_2D, last_texture);
    glBindBuffer(GL_ARRAY_BUFFER, last_array_buffer);
    glBindVertexArray(last_vertex_array);

    return true;
}

void    ImGui_ImplOpenGL3_DestroyDeviceObjects()
{
    if (g_VboHandle) glDeleteBuffers(1, &g_VboHandle);
    if (g_ElementsHandle) glDeleteBuffers(1, &g_ElementsHandle);
    g_VboHandle = g_ElementsHandle = 0;

    if (g_ShaderHandle && g_VertHandle) glDetachShader(g_ShaderHandle, g_VertHandle);
    if (g_VertHandle) glDeleteShader(g_VertHandle);
    g_VertHandle = 0;

    if (g_ShaderHandle && g_FragHandle) glDetachShader(g_ShaderHandle, g_FragHandle);
    if (g_FragHandle) glDeleteShader(g_FragHandle);
    g_FragHandle = 0;

    if (g_ShaderHandle) glDeleteProgram(g_ShaderHandle);
    g_ShaderHandle = 0;

    ImGui_ImplOpenGL3_DestroyFontsTexture();
}
955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
/******************************************************************************
 * arch/x86/shadow.c
 *
 * Copyright (c) 2005 Michael A Fetterman
 * Based on an earlier implementation by Ian Pratt et al
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
/*
 * Jun Nakajima <jun.nakajima@intel.com>
 * Chengyuan Li <chengyuan.li@intel.com>
 *
 * Extended to support 32-bit PAE and 64-bit guests.
 */

#include <xen/config.h>
#include <xen/types.h>
#include <xen/mm.h>
#include <xen/domain_page.h>
#include <asm/shadow.h>
#include <asm/page.h>
#include <xen/event.h>
#include <xen/sched.h>
#include <xen/trace.h>
#include <asm/shadow_64.h>

/* Use this to have the compiler remove unnecessary branches */
#define SH_L1_HAS_NEXT_PAGE (GUEST_L1_PAGETABLE_ENTRIES - L1_PAGETABLE_ENTRIES)

extern void free_shadow_pages(struct domain *d);

#if 0 // this code has not been updated for 32pae & 64 bit modes
#if SHADOW_DEBUG
static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn);
#endif
#endif

#if CONFIG_PAGING_LEVELS == 3
static unsigned long shadow_l3_table(
    struct domain *d, unsigned long gpfn, unsigned long gmfn);
#endif

#if CONFIG_PAGING_LEVELS == 4
static unsigned long shadow_l4_table(
    struct domain *d, unsigned long gpfn, unsigned long gmfn);
#endif

#if CONFIG_PAGING_LEVELS >= 3
static void shadow_map_into_current(struct vcpu *v,
    unsigned long va, unsigned int from, unsigned int to);
static inline void validate_bl2e_change( struct domain *d,
    guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
#endif

/********

There's a per-domain shadow table spin lock which works fine for SMP
hosts. We don't have to worry about interrupts as no shadow operations
happen in an interrupt context. It's probably not quite ready for SMP
guest operation as we have to worry about synchonisation between gpte
and spte updates. Its possible that this might only happen in a
hypercall context, in which case we'll probably at have a per-domain
hypercall lock anyhow (at least initially).

********/

static inline int
shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
               unsigned long new_type)
{
    struct page_info *page = mfn_to_page(gmfn);
    int pinned = 0, okay = 1;

    if ( page_out_of_sync(page) )
    {
        // Don't know how long ago this snapshot was taken.
        // Can't trust it to be recent enough.
        //
        __shadow_sync_mfn(d, gmfn);
    }

    if ( !shadow_mode_refcounts(d) )
        return 1;

    if ( unlikely(page_is_page_table(page)) )
        return 1;

    FSH_LOG("%s: gpfn=%lx gmfn=%lx nt=%08lx", __func__, gpfn, gmfn, new_type);

    if ( !shadow_remove_all_write_access(d, gpfn, gmfn) )
    {
        FSH_LOG("%s: couldn't find/remove all write accesses, gpfn=%lx gmfn=%lx",
                __func__, gpfn, gmfn);
#if 1 || defined(LIVE_DANGEROUSLY)
        set_bit(_PGC_page_table, &page->count_info);
        return 1;
#endif
        return 0;
    }

    // To convert this page to use as a page table, the writable count
    // should now be zero.  Test this by grabbing the page as an page table,
    // and then immediately releasing.  This will also deal with any
    // necessary TLB flushing issues for us.
    //
    // The cruft here about pinning doesn't really work right.  This
    // needs rethinking/rewriting...  Need to gracefully deal with the
    // TLB flushes required when promoting a writable page, and also deal
    // with any outstanding (external) writable refs to this page (by
    // refusing to promote it).  The pinning headache complicates this
    // code -- it would all get much simpler if we stop using
    // shadow_lock() and move the shadow code to BIGLOCK().
    //
    if ( unlikely(!get_page(page, d)) )
        BUG(); // XXX -- needs more thought for a graceful failure
    if ( unlikely(test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info)) )
    {
        pinned = 1;
        put_page_and_type(page);
    }
    if ( get_page_type(page, PGT_base_page_table) )
    {
        set_bit(_PGC_page_table, &page->count_info);
        put_page_type(page);
    }
    else
    {
        printk("shadow_promote: get_page_type failed "
               "dom%d gpfn=%lx gmfn=%lx t=%08lx\n",
               d->domain_id, gpfn, gmfn, new_type);
        okay = 0;
    }

    // Now put the type back to writable...
    if ( unlikely(!get_page_type(page, PGT_writable_page)) )
        BUG(); // XXX -- needs more thought for a graceful failure
    if ( unlikely(pinned) )
    {
        if ( unlikely(test_and_set_bit(_PGT_pinned,
                                       &page->u.inuse.type_info)) )
            BUG(); // hmm... someone pinned this again?
    }
    else
        put_page_and_type(page);

    return okay;
}


/*
 * Things in shadow mode that collect get_page() refs to the domain's
 * pages are:
 * - PGC_allocated takes a gen count, just like normal.
 * - A writable page can be pinned (paravirtualized guests may consider
 *   these pages to be L1s or L2s, and don't know the difference).
 *   Pinning a page takes a gen count (but, for domains in shadow mode,
 *   it *doesn't* take a type count)
 * - CR3 grabs a ref to whatever it points at, just like normal.
 * - Shadow mode grabs an initial gen count for itself, as a placehold
 *   for whatever references will exist.
 * - Shadow PTEs that point to a page take a gen count, just like regular
 *   PTEs.  However, they don't get a type count, as get_page_type() is
 *   hardwired to keep writable pages' counts at 1 for domains in shadow
 *   mode.
 * - Whenever we shadow a page, the entry in the shadow hash grabs a
 *   general ref to the page.
 * - Whenever a page goes out of sync, the out of sync entry grabs a
 *   general ref to the page.
 */
/*
 * page_info fields for pages allocated as shadow pages:
 *
 * All 32 bits of count_info are a simple count of refs to this shadow
 * from a) other shadow pages, b) current CR3's (aka ed->arch.shadow_table),
 * c) if it's a pinned shadow root pgtable, d) outstanding out-of-sync
 * references.
 *
 * u.inuse._domain is left NULL, to prevent accidently allow some random
 * domain from gaining permissions to map this page.
 *
 * u.inuse.type_info & PGT_type_mask remembers what kind of page is being
 * shadowed.
 * u.inuse.type_info & PGT_mfn_mask holds the mfn of the page being shadowed.
 * u.inuse.type_info & PGT_pinned says that an extra reference to this shadow
 * is currently exists because this is a shadow of a root page, and we
 * don't want to let those disappear just because no CR3 is currently pointing
 * at it.
 *
 * tlbflush_timestamp holds a min & max index of valid page table entries
 * within the shadow page.
 */

static inline unsigned long
alloc_shadow_page(struct domain *d,
                  unsigned long gpfn, unsigned long gmfn,
                  u32 psh_type)
{
    struct page_info *page;
    unsigned long smfn;
    int pin = 0;
    void *l1, *lp;

    // Currently, we only keep pre-zero'ed pages around for use as L1's...
    // This will change.  Soon.
    //
    if ( psh_type == PGT_l1_shadow )
    {
        if ( !list_empty(&d->arch.free_shadow_frames) )
        {
            struct list_head *entry = d->arch.free_shadow_frames.next;
            page = list_entry(entry, struct page_info, list);
            list_del(entry);
            perfc_decr(free_l1_pages);
        }
        else
        {
            if ( SH_L1_HAS_NEXT_PAGE &&
                 d->arch.ops->guest_paging_levels == PAGING_L2)
            {
#if CONFIG_PAGING_LEVELS >= 3
                /* 
                 * For 32-bit HVM guest, 2 shadow L1s are required to
                 * simulate 1 guest L1 So need allocate 2 shadow L1
                 * pages each time. 
                 *
                 * --> Need to avoidalloc_domheap_pages.
                 */
                page = alloc_domheap_pages(NULL, SL1_ORDER, 0);
                if (!page)
                    goto no_shadow_page;

                l1 = map_domain_page(page_to_mfn(page));
                memset(l1, 0, PAGE_SIZE);
                unmap_domain_page(l1);

                l1 = map_domain_page(page_to_mfn(page + 1));
                memset(l1, 0, PAGE_SIZE);
                unmap_domain_page(l1);
#else
                page = alloc_domheap_page(NULL);
                if (!page)
                    goto no_shadow_page;

                l1 = map_domain_page(page_to_mfn(page));
                memset(l1, 0, PAGE_SIZE);
                unmap_domain_page(l1);
#endif
            }
            else
            {
                page = alloc_domheap_page(NULL);
                if (!page)
                    goto no_shadow_page;

                l1 = map_domain_page(page_to_mfn(page));
                memset(l1, 0, PAGE_SIZE);
                unmap_domain_page(l1);
            }
        }
    }
    else {
#if CONFIG_PAGING_LEVELS == 2
        page = alloc_domheap_page(NULL);
#elif CONFIG_PAGING_LEVELS >= 3
        if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
             psh_type == PGT_l4_shadow )      /* allocated for PAE PDP page */
            page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA);
        else if ( d->arch.ops->guest_paging_levels == PAGING_L3 &&
                  psh_type == PGT_l3_shadow ) /* allocated for PAE PDP page */
            page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA);
        else
            page = alloc_domheap_page(NULL);
#endif
        if (!page)
            goto no_shadow_page;

        lp = map_domain_page(page_to_mfn(page));
        memset(lp, 0, PAGE_SIZE);
        unmap_domain_page(lp);
    }

    smfn = page_to_mfn(page);

    ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
    page->u.inuse.type_info = psh_type | gmfn;
    page->count_info = 0;
    page->tlbflush_timestamp = 0;

    switch ( psh_type )
    {
    case PGT_l1_shadow:
        if ( !shadow_promote(d, gpfn, gmfn, psh_type) )
            goto fail;
        perfc_incr(shadow_l1_pages);
        d->arch.shadow_page_count++;
        break;

    case PGT_l2_shadow:
        if ( !shadow_promote(d, gpfn, gmfn, psh_type) )
            goto fail;
        perfc_incr(shadow_l2_pages);
        d->arch.shadow_page_count++;
        if ( PGT_l2_page_table == PGT_root_page_table )
            pin = 1;

        break;

    case PGT_l3_shadow:
        if ( !shadow_promote(d, gpfn, gmfn, psh_type) )
            goto fail;
        perfc_incr(shadow_l3_pages);
        d->arch.shadow_page_count++;
        if ( PGT_l3_page_table == PGT_root_page_table )
            pin = 1;
        break;

    case PGT_l4_shadow:
        if ( !shadow_promote(d, gpfn, gmfn, psh_type) )
            goto fail;
        perfc_incr(shadow_l4_pages);
        d->arch.shadow_page_count++;
        if ( PGT_l4_page_table == PGT_root_page_table )
            pin = 1;
        break;

#if CONFIG_PAGING_LEVELS >= 4
    case PGT_fl1_shadow:
        perfc_incr(shadow_l1_pages);
        d->arch.shadow_page_count++;
        break;
#else

    case PGT_hl2_shadow:
        // Treat an hl2 as an L1 for purposes of promotion.
        // For external mode domains, treat them as an L2 for purposes of
        // pinning.
        //
        if ( !shadow_promote(d, gpfn, gmfn, PGT_l1_shadow) )
            goto fail;
        perfc_incr(hl2_table_pages);
        d->arch.hl2_page_count++;
        if ( shadow_mode_external(d) &&
             (PGT_l2_page_table == PGT_root_page_table) )
            pin = 1;

        break;
#endif
    case PGT_snapshot:
        perfc_incr(snapshot_pages);
        d->arch.snapshot_page_count++;
        break;

    default:
        printk("Alloc shadow weird page type type=%08x\n", psh_type);
        BUG();
        break;
    }

    // Don't add a new shadow of something that already has a snapshot.
    //
    ASSERT( (psh_type == PGT_snapshot) || !mfn_out_of_sync(gmfn) );

    set_shadow_status(d, gpfn, gmfn, smfn, psh_type);

    if ( pin )
        shadow_pin(smfn);

    return smfn;

fail:
    FSH_LOG("promotion of pfn=%lx mfn=%lx failed!  external gnttab refs?",
            gpfn, gmfn);
    if (psh_type == PGT_l1_shadow)
    {
        if (d->arch.ops->guest_paging_levels == PAGING_L2)
        {
#if CONFIG_PAGING_LEVELS >=3
            free_domheap_pages(page, SL1_ORDER);
#else
            free_domheap_page(page);
#endif
        }
        else
            free_domheap_page(page);
    }
    else
        free_domheap_page(page);

    return 0;

no_shadow_page:
    ASSERT(page == NULL);
    printk("Couldn't alloc shadow page! dom%d count=%d\n",
           d->domain_id, d->arch.shadow_page_count);
    printk("Shadow table counts: l1=%d l2=%d hl2=%d snapshot=%d\n",
           perfc_value(shadow_l1_pages),
           perfc_value(shadow_l2_pages),
           perfc_value(hl2_table_pages),
           perfc_value(snapshot_pages));
    BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */

    return 0;
}

#if CONFIG_PAGING_LEVELS == 2
static unsigned long
shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn,
                unsigned long smfn)
{
    unsigned long hl2mfn;
    l1_pgentry_t *hl2;
    int limit;

    ASSERT(PGT_base_page_table == PGT_l2_page_table);

    if ( unlikely(!(hl2mfn = alloc_shadow_page(d, gpfn, gmfn, PGT_hl2_shadow))) )
    {
        printk("Couldn't alloc an HL2 shadow for pfn=%lx mfn=%lx\n",
               gpfn, gmfn);
        BUG(); /* XXX Deal gracefully with failure. */
    }

    SH_VVLOG("shadow_hl2_table(gpfn=%lx, gmfn=%lx, smfn=%lx) => %lx",
             gpfn, gmfn, smfn, hl2mfn);
    perfc_incrc(shadow_hl2_table_count);

    hl2 = map_domain_page(hl2mfn);

    if ( shadow_mode_external(d) )
        limit = L2_PAGETABLE_ENTRIES;
    else
        limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;

    memset(hl2, 0, limit * sizeof(l1_pgentry_t));

    if ( !shadow_mode_external(d) )
    {
        memset(&hl2[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 0,
               HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));

        // Setup easy access to the GL2, SL2, and HL2 frames.
        //
        hl2[l2_table_offset(LINEAR_PT_VIRT_START)] =
            l1e_from_pfn(gmfn, __PAGE_HYPERVISOR);
        hl2[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
            l1e_from_pfn(smfn, __PAGE_HYPERVISOR);
        hl2[l2_table_offset(PERDOMAIN_VIRT_START)] =
            l1e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
    }

    unmap_domain_page(hl2);

    return hl2mfn;
}

/*
 * This could take and use a snapshot, and validate the entire page at
 * once, or it could continue to fault in entries one at a time...
 * Might be worth investigating...
 */
static unsigned long shadow_l2_table(
    struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
    unsigned long smfn;
    l2_pgentry_t *spl2e;
    int i;

    SH_VVLOG("shadow_l2_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);

    perfc_incrc(shadow_l2_table_count);

    if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l2_shadow))) )
    {
        printk("Couldn't alloc an L2 shadow for pfn=%lx mfn=%lx\n",
               gpfn, gmfn);
        BUG(); /* XXX Deal gracefully with failure. */
    }

    spl2e = (l2_pgentry_t *)map_domain_page(smfn);

    /* Install hypervisor and 2x linear p.t. mapings. */
    if ( (PGT_base_page_table == PGT_l2_page_table) &&
         !shadow_mode_external(d) )
    {
        /*
         * We could proactively fill in PDEs for pages that are already
         * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
         * (restriction required for coherence of the accessed bit). However,
         * we tried it and it didn't help performance. This is simpler.
         */
        memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));

        /* Install hypervisor and 2x linear p.t. mapings. */
        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
               &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
               HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));

        spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
            l2e_from_pfn(smfn, __PAGE_HYPERVISOR);

        for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
            spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
                l2e_from_page(virt_to_page(page_get_owner(mfn_to_page(gmfn))->
                                           arch.mm_perdomain_pt) + i,
                              __PAGE_HYPERVISOR);

        if ( shadow_mode_translate(d) ) // NB: not external
        {
            unsigned long hl2mfn;

            spl2e[l2_table_offset(RO_MPT_VIRT_START)] =
                l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
                                __PAGE_HYPERVISOR);

            if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
                hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);

            // shadow_mode_translate (but not external) sl2 tables hold a
            // ref to their hl2.
            //
            if ( !get_shadow_ref(hl2mfn) )
                BUG();

            spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
                l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
        }
        else
            spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
                l2e_from_pfn(gmfn, __PAGE_HYPERVISOR);
    }
    else
    {
        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));
    }

    unmap_domain_page(spl2e);

    SH_VLOG("shadow_l2_table(%lx -> %lx)", gmfn, smfn);
    return smfn;
}
#endif /* CONFIG_PAGING_LEVELS == 2 */

static void shadow_map_l1_into_current_l2(unsigned long va)
{
    struct vcpu *v = current;
    struct domain *d = v->domain;
    l1_pgentry_t *spl1e, *spl1e_next = 0;
    l2_pgentry_t sl2e;
    guest_l1_pgentry_t *gpl1e;
    guest_l2_pgentry_t gl2e = {0};
    unsigned long gl1pfn, gl1mfn, sl1mfn;
    int i, init_table = 0;

    __guest_get_l2e(v, va, &gl2e);
    ASSERT(guest_l2e_get_flags(gl2e) & _PAGE_PRESENT);
    gl1pfn = l2e_get_pfn(gl2e);

    if ( !(sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow)) )
    {
        /* This L1 is NOT already shadowed so we need to shadow it. */
        SH_VVLOG("4a: l1 not shadowed");

        gl1mfn = gmfn_to_mfn(d, gl1pfn);
        if ( unlikely(!VALID_MFN(gl1mfn)) )
        {
            // Attempt to use an invalid pfn as an L1 page.
            // XXX this needs to be more graceful!
            BUG();
        }

        if ( unlikely(!(sl1mfn =
                        alloc_shadow_page(d, gl1pfn, gl1mfn, PGT_l1_shadow))) )
        {
            printk("Couldn't alloc an L1 shadow for pfn=%lx mfn=%lx\n",
                   gl1pfn, gl1mfn);
            BUG(); /* XXX Need to deal gracefully with failure. */
        }

        perfc_incrc(shadow_l1_table_count);
        init_table = 1;
    }
    else
    {
        /* This L1 is shadowed already, but the L2 entry is missing. */
        SH_VVLOG("4b: was shadowed, l2 missing (%lx)", sl1mfn);
    }

#ifndef NDEBUG
    {
        l2_pgentry_t old_sl2e;
        __shadow_get_l2e(v, va, &old_sl2e);
        ASSERT(!(l2e_get_flags(old_sl2e) & _PAGE_PRESENT));
    }
#endif

#if CONFIG_PAGING_LEVELS >= 3
    if ( SH_L1_HAS_NEXT_PAGE && 
         d->arch.ops->guest_paging_levels == PAGING_L2 )
    {
        /* for 32-bit HVM guest on 64-bit or PAE host,
         * need update two L2 entries each time
         */
        if ( !get_shadow_ref(sl1mfn))
            BUG();
        l2pde_general(d, &gl2e, &sl2e, sl1mfn);
        __guest_set_l2e(v, va, &gl2e);
        __shadow_set_l2e(v, va & ~((1<<L2_PAGETABLE_SHIFT_32) - 1), &sl2e);
        if ( !get_shadow_ref(sl1mfn+1))
            BUG();
        sl2e = l2e_empty();
        l2pde_general(d, &gl2e, &sl2e, sl1mfn+1);
        __shadow_set_l2e(v,((va & ~((1<<L2_PAGETABLE_SHIFT_32) - 1)) + (1 << L2_PAGETABLE_SHIFT)) , &sl2e);
    } else
#endif
    {
        if ( !get_shadow_ref(sl1mfn) )
            BUG();
        l2pde_general(d, &gl2e, &sl2e, sl1mfn);
        __guest_set_l2e(v, va, &gl2e);
        __shadow_set_l2e(v, va , &sl2e);
    }

    if ( init_table )
    {
        l1_pgentry_t sl1e;
        int index = guest_l1_table_offset(va);
        int min = 1, max = 0;

        unsigned long tmp_gmfn;
        l2_pgentry_t tmp_sl2e = {0};
        guest_l2_pgentry_t tmp_gl2e = {0};

        __guest_get_l2e(v, va, &tmp_gl2e);
        tmp_gmfn = gmfn_to_mfn(d, l2e_get_pfn(tmp_gl2e));
        gpl1e = (guest_l1_pgentry_t *) map_domain_page(tmp_gmfn);

        /* If the PGT_l1_shadow has two contiguous pages */
#if CONFIG_PAGING_LEVELS >= 3
        if ( SH_L1_HAS_NEXT_PAGE &&
             d->arch.ops->guest_paging_levels == PAGING_L2 )
            __shadow_get_l2e(v,  va & ~((1UL << L2_PAGETABLE_SHIFT_32) - 1), &tmp_sl2e);
        else
#endif
        __shadow_get_l2e(v, va, &tmp_sl2e);

        spl1e = (l1_pgentry_t *) map_domain_page(l2e_get_pfn(tmp_sl2e));

        if ( SH_L1_HAS_NEXT_PAGE )
            spl1e_next = (l1_pgentry_t *) map_domain_page(
                (l2e_get_pfn(tmp_sl2e) + 1UL));

        for ( i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++ )
        {
            l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
            if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
                 unlikely(!shadow_get_page_from_l1e(sl1e, d)) )
                sl1e = l1e_empty();
            if ( l1e_get_flags(sl1e) == 0 )
            {
                // First copy entries from 0 until first invalid.
                // Then copy entries from index until first invalid.
                //
                if ( i < index ) {
                    i = index - 1;
                    continue;
                }
                break;
            }

            if ( SH_L1_HAS_NEXT_PAGE && i >= L1_PAGETABLE_ENTRIES )
                spl1e_next[i - L1_PAGETABLE_ENTRIES] = sl1e;
            else 
                spl1e[i] = sl1e;

            if ( unlikely(i < min) )
                min = i;
            if ( likely(i > max) )
                max = i;
            set_guest_back_ptr(d, sl1e, sl1mfn, i);
        }

        mfn_to_page(sl1mfn)->tlbflush_timestamp =
            SHADOW_ENCODE_MIN_MAX(min, max);

        unmap_domain_page(gpl1e);
        unmap_domain_page(spl1e);

        if ( SH_L1_HAS_NEXT_PAGE )
            unmap_domain_page(spl1e_next);
    }
}

#if CONFIG_PAGING_LEVELS == 2
static void
shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
{
    struct vcpu *v = current;
    struct domain *d = v->domain;
    l2_pgentry_t sl2e = {0};

    __shadow_get_l2e(v, va, &sl2e);
    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
    {
        /*
         * Either the L1 is not shadowed, or the shadow isn't linked into
         * the current shadow L2.
         */
        if ( create_l1_shadow )
        {
            perfc_incrc(shadow_set_l1e_force_map);
            shadow_map_l1_into_current_l2(va);
        }
        else /* check to see if it exists; if so, link it in */
        {
            l2_pgentry_t gpde = {0};
            unsigned long gl1pfn;
            unsigned long sl1mfn;

            __guest_get_l2e(v, va, &gpde);

            if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
            {
                gl1pfn = l2e_get_pfn(gpde);
                sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow);
            }
            else
            {
                // no shadow exists, so there's nothing to do.
                perfc_incrc(shadow_set_l1e_fail);
                return;
            }

            if ( sl1mfn )
            {
                perfc_incrc(shadow_set_l1e_unlinked);
                if ( !get_shadow_ref(sl1mfn) )
                    BUG();
                l2pde_general(d, (guest_l2_pgentry_t *)&gpde, &sl2e, sl1mfn);
                __guest_set_l2e(v, va, &gpde);
                __shadow_set_l2e(v, va, &sl2e);
            }
            else
            {
                // no shadow exists, so there's nothing to do.
                perfc_incrc(shadow_set_l1e_fail);
                return;
            }
        }
    }

    __shadow_get_l2e(v, va, &sl2e);

    if ( shadow_mode_refcounts(d) )
    {
        l1_pgentry_t old_spte;
        __shadow_get_l1e(v, va, &old_spte);

        // only do the ref counting if something important changed.
        //
        if ( l1e_has_changed(old_spte, new_spte, _PAGE_RW | _PAGE_PRESENT) )
        {
            if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
                 !shadow_get_page_from_l1e(new_spte, d) )
                new_spte = l1e_empty();
            if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
                shadow_put_page_from_l1e(old_spte, d);
        }
    }

    set_guest_back_ptr(d, new_spte, l2e_get_pfn(sl2e), l1_table_offset(va));
    __shadow_set_l1e(v, va, &new_spte);
    shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
}

static void shadow_invlpg_32(struct vcpu *v, unsigned long va)
{
    struct domain *d = v->domain;
    l1_pgentry_t gpte, spte;

    ASSERT(shadow_mode_enabled(d));

    shadow_lock(d);

    __shadow_sync_va(v, va);

    // XXX mafetter: will need to think about 4MB pages...

    // It's not strictly necessary to update the shadow here,
    // but it might save a fault later.
    //
    /*if (__copy_from_user(&gpte, &linear_pg_table[va >> PAGE_SHIFT],
                         sizeof(gpte))) {*/
    if (unlikely(!__guest_get_l1e(v, va, &gpte))) {
        perfc_incrc(shadow_invlpg_faults);
        shadow_unlock(d);
        return;
    }
    l1pte_propagate_from_guest(d, gpte, &spte);
    shadow_set_l1e(va, spte, 1);

    shadow_unlock(d);
}
#endif /* CONFIG_PAGING_LEVELS == 2 */

#if CONFIG_PAGING_LEVELS >= 3
static void shadow_set_l1e_64(
    unsigned long va, pgentry_64_t *sl1e_p,
    int create_l1_shadow)
{
    struct vcpu *v = current;
    struct domain *d = v->domain;
    pgentry_64_t sle = { 0 };
    pgentry_64_t sle_up = {0};
    l1_pgentry_t old_spte;
    l1_pgentry_t sl1e = *(l1_pgentry_t *)sl1e_p;
    int i;
    unsigned long orig_va = 0;

    if ( d->arch.ops->guest_paging_levels == PAGING_L2 ) 
    {
        /* This is for 32-bit VMX guest on 64-bit host */
        orig_va = va;
        va = va & (~((1<<L2_PAGETABLE_SHIFT_32)-1));
    }

    for ( i = PAGING_L4; i >= PAGING_L2; i-- )
    {
        if ( !__rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i) )
        {
            sl1e = l1e_empty();
            goto out;
        }
        if ( !(entry_get_flags(sle) & _PAGE_PRESENT) )
        {
            if ( create_l1_shadow )
            {
                perfc_incrc(shadow_set_l3e_force_map);
                shadow_map_into_current(v, va, i-1, i);
                __rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i);
            }
        }
        if ( i < PAGING_L4 )
            shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
        sle_up = sle;
    }

    if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
    {
        va = orig_va;
    }

    if ( shadow_mode_refcounts(d) )
    {
        __shadow_get_l1e(v, va, &old_spte);
        if ( l1e_has_changed(old_spte, sl1e, _PAGE_RW | _PAGE_PRESENT) )
        {
            if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
                 !shadow_get_page_from_l1e(sl1e, d) )
                sl1e = l1e_empty();
            if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
                put_page_from_l1e(old_spte, d);
        }
    }

out:
    __shadow_set_l1e(v, va, &sl1e);

    shadow_update_min_max(entry_get_pfn(sle_up), guest_l1_table_offset(va));
}
#endif /* CONFIG_PAGING_LEVELS >= 3 */

static struct out_of_sync_entry *
shadow_alloc_oos_entry(struct domain *d)
{
    struct out_of_sync_entry *f, *extra;
    unsigned size, i;

    if ( unlikely(d->arch.out_of_sync_free == NULL) )
    {
        FSH_LOG("Allocate more fullshadow tuple blocks.");

        size = sizeof(void *) + (out_of_sync_extra_size * sizeof(*f));
        extra = xmalloc_bytes(size);

        /* XXX Should be more graceful here. */
        if ( extra == NULL )
            BUG();

        memset(extra, 0, size);

        /* Record the allocation block so it can be correctly freed later. */
        d->arch.out_of_sync_extras_count++;
        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) =
            d->arch.out_of_sync_extras;
        d->arch.out_of_sync_extras = &extra[0];

        /* Thread a free chain through the newly-allocated nodes. */
        for ( i = 0; i < (out_of_sync_extra_size - 1); i++ )
            extra[i].next = &extra[i+1];
        extra[i].next = NULL;

        /* Add the new nodes to the free list. */
        d->arch.out_of_sync_free = &extra[0];
    }

    /* Allocate a new node from the quicklist. */
    f = d->arch.out_of_sync_free;
    d->arch.out_of_sync_free = f->next;

    return f;
}

static inline unsigned long
shadow_make_snapshot(
    struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
    unsigned long smfn, sl1mfn = 0;
    void *original, *snapshot;
    u32 min_max = 0;
    int min, max, length;

    if ( test_and_set_bit(_PGC_out_of_sync, &mfn_to_page(gmfn)->count_info) )
    {
        ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
        return SHADOW_SNAPSHOT_ELSEWHERE;
    }

    perfc_incrc(shadow_make_snapshot);

    if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_snapshot))) )
    {
        printk("Couldn't alloc fullshadow snapshot for pfn=%lx mfn=%lx!\n"
               "Dom%d snapshot_count_count=%d\n",
               gpfn, gmfn, d->domain_id, d->arch.snapshot_page_count);
        BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
    }

    if ( !get_shadow_ref(smfn) )
        BUG();

    if ( shadow_mode_refcounts(d) &&
         (shadow_max_pgtable_type(d, gpfn, &sl1mfn) == PGT_l1_shadow) )
        min_max = mfn_to_page(sl1mfn)->tlbflush_timestamp;
    mfn_to_page(smfn)->tlbflush_timestamp = min_max;

    min = SHADOW_MIN(min_max);
    max = SHADOW_MAX(min_max);
    length = max - min + 1;
    perfc_incr_histo(snapshot_copies, length, PT_UPDATES);

    min *= sizeof(guest_l1_pgentry_t);
    length *= sizeof(guest_l1_pgentry_t);

    original = map_domain_page(gmfn);
    snapshot = map_domain_page(smfn);
    memcpy(snapshot + min, original + min, length);
    unmap_domain_page(original);
    unmap_domain_page(snapshot);

    return smfn;
}

static struct out_of_sync_entry *
__mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
                             unsigned long mfn)
{
    struct domain *d = v->domain;
    struct page_info *page = mfn_to_page(mfn);
    struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);

    ASSERT(shadow_lock_is_acquired(d));
    ASSERT(mfn_valid(mfn));

#ifndef NDEBUG
    {
        u32 type = page->u.inuse.type_info & PGT_type_mask;
        if ( shadow_mode_refcounts(d) )
        {
            ASSERT(type == PGT_writable_page);
        }
        else
        {
            ASSERT(type && (type < PGT_l4_page_table));
        }
    }
#endif

    FSH_LOG("%s(gpfn=%lx, mfn=%lx) c=%08x t=%08x", __func__,
            gpfn, mfn, page->count_info, page->u.inuse.type_info);

    // XXX this will require some more thought...  Cross-domain sharing and
    //     modification of page tables?  Hmm...
    //
    if ( d != page_get_owner(page) )
        BUG();

    perfc_incrc(shadow_mark_mfn_out_of_sync_calls);

    entry->v = v;
    entry->gpfn = gpfn;
    entry->gmfn = mfn;
    entry->writable_pl1e = -1;

#if 0 // this code has not been updated for 32pae & 64 bit modes
#if SHADOW_DEBUG
    mark_shadows_as_reflecting_snapshot(d, gpfn);
#endif
#endif

    // increment guest's ref count to represent the entry in the
    // full shadow out-of-sync list.
    //
    get_page(page, d);

    return entry;
}

static struct out_of_sync_entry *
mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
                             unsigned long mfn)
{
    struct out_of_sync_entry *entry =
        __mark_mfn_out_of_sync(v, gpfn, mfn);
    struct domain *d = v->domain;

    entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
    // Add to the out-of-sync list
    //
    entry->next = d->arch.out_of_sync;
    d->arch.out_of_sync = entry;

    return entry;

}

static void shadow_mark_va_out_of_sync(
    struct vcpu *v, unsigned long gpfn, unsigned long mfn, unsigned long va)
{
    struct out_of_sync_entry *entry =
        __mark_mfn_out_of_sync(v, gpfn, mfn);
    l2_pgentry_t sl2e;
    struct domain *d = v->domain;

#if CONFIG_PAGING_LEVELS >= 3
    {
        l4_pgentry_t sl4e;
        l3_pgentry_t sl3e;

        __shadow_get_l4e(v, va, &sl4e);
        if ( !(l4e_get_flags(sl4e) & _PAGE_PRESENT)) {
            shadow_map_into_current(v, va, PAGING_L3, PAGING_L4);
        }

        if (!__shadow_get_l3e(v, va, &sl3e)) {
            BUG();
        }

        if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT)) {
            shadow_map_into_current(v, va, PAGING_L2, PAGING_L3);
        }
    }
#endif

    // We need the address of shadow PTE that maps @va.
    // It might not exist yet.  Make sure it's there.
    //
    __shadow_get_l2e(v, va, &sl2e);
    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
    {
        // either this L1 isn't shadowed yet, or the shadow isn't linked into
        // the current L2.
        shadow_map_l1_into_current_l2(va);
        __shadow_get_l2e(v, va, &sl2e);
    }
    ASSERT(l2e_get_flags(sl2e) & _PAGE_PRESENT);

    entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
    // NB: this is stored as a machine address.
    entry->writable_pl1e =
        l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
    ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
    entry->va = va;

    // Increment shadow's page count to represent the reference
    // inherent in entry->writable_pl1e
    //
    if ( !get_shadow_ref(l2e_get_pfn(sl2e)) )
        BUG();

    // Add to the out-of-sync list
    //
    entry->next = d->arch.out_of_sync;
    d->arch.out_of_sync = entry;

    FSH_LOG("%s(va=%lx -> writable_pl1e=%lx)",
            __func__, va, entry->writable_pl1e);
}

/*
 * Returns 1 if the snapshot for @gmfn exists and its @index'th entry matches.
 * Returns 0 otherwise.
 */
static int snapshot_entry_matches(
    struct domain *d, guest_l1_pgentry_t *guest_pt,
    unsigned long gpfn, unsigned index)
{
    unsigned long smfn = __shadow_status(d, gpfn, PGT_snapshot);
    guest_l1_pgentry_t *snapshot, gpte; // could be L1s or L2s or ...
    int entries_match;

    perfc_incrc(snapshot_entry_matches_calls);

    if ( !smfn )
        return 0;

    snapshot = map_domain_page(smfn);

    if (__copy_from_user(&gpte, &guest_pt[index],
                         sizeof(gpte)))
    {
        unmap_domain_page(snapshot);
        return 0;
    }

    // This could probably be smarter, but this is sufficent for
    // our current needs.
    //
    entries_match = !guest_l1e_has_changed(gpte, snapshot[index],
                                     PAGE_FLAG_MASK);

    unmap_domain_page(snapshot);

#ifdef PERF_COUNTERS
    if ( entries_match )
        perfc_incrc(snapshot_entry_matches_true);
#endif

    return entries_match;
}

/*
 * Returns 1 if va's shadow mapping is out-of-sync.
 * Returns 0 otherwise.
 */
static int is_out_of_sync(struct vcpu *v, unsigned long va) /* __shadow_out_of_sync */
{
    struct domain *d = v->domain;
#if CONFIG_PAGING_LEVELS == 4
    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)?
                          pagetable_get_pfn(v->arch.guest_table) :
                          pagetable_get_pfn(v->arch.guest_table_user));
#else
    unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
#endif
    unsigned long l2pfn = mfn_to_gmfn(d, l2mfn);
    guest_l2_pgentry_t l2e;
    unsigned long l1pfn, l1mfn;
    guest_l1_pgentry_t *guest_pt;

    ASSERT(shadow_lock_is_acquired(d));
    ASSERT(VALID_M2P(l2pfn));

    perfc_incrc(shadow_out_of_sync_calls);

#if CONFIG_PAGING_LEVELS >= 3

#define unmap_and_return(x)                                         \
    if ( guest_pt != (guest_l1_pgentry_t *) v->arch.guest_vtable )  \
        unmap_domain_page(guest_pt);                                \
    return (x);

    if (d->arch.ops->guest_paging_levels >= PAGING_L3) 
    { 
        pgentry_64_t le;
        unsigned long gmfn;
        unsigned long gpfn;
        int i;

        gmfn = l2mfn;
        gpfn = l2pfn;
        guest_pt = (guest_l1_pgentry_t *)v->arch.guest_vtable;

        for ( i = PAGING_L4; i >= PAGING_L3; i-- ) 
        {
            if (d->arch.ops->guest_paging_levels == PAGING_L3 
                && i == PAGING_L4)
                continue;       /* skip the top-level for 3-level */

            if ( page_out_of_sync(mfn_to_page(gmfn)) &&
                 !snapshot_entry_matches(
                     d, guest_pt, gpfn, table_offset_64(va, i)) )
            {
                unmap_and_return (1);
            }

            le = entry_empty();
            __rw_entry(v, va, &le, GUEST_ENTRY | GET_ENTRY | i);

            if ( !(entry_get_flags(le) & _PAGE_PRESENT) )
            {
                unmap_and_return (0);
            }
            gpfn = entry_get_pfn(le);
            gmfn = gmfn_to_mfn(d, gpfn);
            if ( !VALID_MFN(gmfn) )
            {
                unmap_and_return (0);
            }
            if ( guest_pt != (guest_l1_pgentry_t *)v->arch.guest_vtable )
                unmap_domain_page(guest_pt);
            guest_pt = (guest_l1_pgentry_t *)map_domain_page(gmfn);
        }

        /* L2 */
        if ( page_out_of_sync(mfn_to_page(gmfn)) &&
             !snapshot_entry_matches(d, guest_pt, gpfn, l2_table_offset(va)) )
        {
            unmap_and_return (1);
        }

        if ( guest_pt != (guest_l1_pgentry_t *)v->arch.guest_vtable )
            unmap_domain_page(guest_pt);

    } 
    else
#undef unmap_and_return
#endif /* CONFIG_PAGING_LEVELS >= 3 */
    {
        if ( page_out_of_sync(mfn_to_page(l2mfn)) &&
             !snapshot_entry_matches(d, (guest_l1_pgentry_t *)v->arch.guest_vtable,
                                     l2pfn, guest_l2_table_offset(va)) )
            return 1;
    }

    __guest_get_l2e(v, va, &l2e);
    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) ||
         (guest_l2e_get_flags(l2e) & _PAGE_PSE))
        return 0;

    l1pfn = l2e_get_pfn(l2e);
    l1mfn = gmfn_to_mfn(d, l1pfn);

    // If the l1 pfn is invalid, it can't be out of sync...
    if ( !VALID_MFN(l1mfn) )
        return 0;

    guest_pt = (guest_l1_pgentry_t *) map_domain_page(l1mfn);

    if ( page_out_of_sync(mfn_to_page(l1mfn)) &&
         !snapshot_entry_matches(
             d, guest_pt, l1pfn, guest_l1_table_offset(va)) ) 
    {
        unmap_domain_page(guest_pt);
        return 1;
    }

    unmap_domain_page(guest_pt);
    return 0;
}

#define GPFN_TO_GPTEPAGE(_gpfn) ((_gpfn) / (PAGE_SIZE / sizeof(guest_l1_pgentry_t)))
static inline unsigned long
predict_writable_pte_page(struct domain *d, unsigned long gpfn)
{
    return __shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), PGT_writable_pred);
}

static inline void
increase_writable_pte_prediction(struct domain *d, unsigned long gpfn, unsigned long prediction)
{
    unsigned long score = prediction & PGT_score_mask;
    int create = (score == 0);

    // saturating addition
    score = (score + (1u << PGT_score_shift)) & PGT_score_mask;
    score = score ? score : PGT_score_mask;

    prediction = (prediction & PGT_mfn_mask) | score;

    //printk("increase gpfn=%lx pred=%lx create=%d\n", gpfn, prediction, create);
    set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred);

    if ( create )
        perfc_incr(writable_pte_predictions);
}

static inline void
decrease_writable_pte_prediction(struct domain *d, unsigned long gpfn, unsigned long prediction)
{
    unsigned long score = prediction & PGT_score_mask;
    ASSERT(score);

    // divide score by 2...  We don't like bad predictions.
    //
    score = (score >> 1) & PGT_score_mask;

    prediction = (prediction & PGT_mfn_mask) | score;

    //printk("decrease gpfn=%lx pred=%lx score=%lx\n", gpfn, prediction, score);

    if ( score )
        set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred);
    else
    {
        delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred);
        perfc_decr(writable_pte_predictions);
    }
}

static int fix_entry(
    struct domain *d,
    l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
{
    l1_pgentry_t old = *pt;
    l1_pgentry_t new = old;

    l1e_remove_flags(new,_PAGE_RW);
    if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
        BUG();
    (*found)++;
    *pt = new;
    if ( is_l1_shadow )
        shadow_put_page_from_l1e(old, d);

    return (*found == max_refs_to_find);
}

static u32 remove_all_write_access_in_ptpage(
    struct domain *d, unsigned long pt_pfn, unsigned long pt_mfn,
    unsigned long readonly_gpfn, unsigned long readonly_gmfn,
    u32 max_refs_to_find, unsigned long prediction)
{
    l1_pgentry_t *pt = map_domain_page(pt_mfn);
    l1_pgentry_t *pt_next = 0, *sl1e_p;
    l1_pgentry_t match;
    unsigned long flags = _PAGE_RW | _PAGE_PRESENT;
    int i;
    u32 found = 0;
    int is_l1_shadow =
        ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
         PGT_l1_shadow);
#if CONFIG_PAGING_LEVELS == 4
    is_l1_shadow |=
      ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
                PGT_fl1_shadow);
#endif

    if ( SH_L1_HAS_NEXT_PAGE )
        pt_next = map_domain_page(pt_mfn + 1);

    match = l1e_from_pfn(readonly_gmfn, flags);

    if ( shadow_mode_external(d) ) 
    {
        i = (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
            >> PGT_va_shift;

        if ( SH_L1_HAS_NEXT_PAGE &&
             i >= L1_PAGETABLE_ENTRIES )
            sl1e_p = &pt_next[i - L1_PAGETABLE_ENTRIES];
        else
            sl1e_p = &pt[i];

        if ( (i >= 0 && i < GUEST_L1_PAGETABLE_ENTRIES) &&
             !l1e_has_changed(*sl1e_p, match, flags) &&
             fix_entry(d, sl1e_p, &found, is_l1_shadow, max_refs_to_find) &&
             !prediction )
            goto out;
    }

    for ( i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++ )
    {
        if ( SH_L1_HAS_NEXT_PAGE &&
             i >= L1_PAGETABLE_ENTRIES )
            sl1e_p = &pt_next[i - L1_PAGETABLE_ENTRIES];
        else
            sl1e_p = &pt[i];

        if ( unlikely(!l1e_has_changed(*sl1e_p, match, flags)) &&
             fix_entry(d, sl1e_p, &found, is_l1_shadow, max_refs_to_find) )
            break;
    }

out:
    unmap_domain_page(pt);
    if ( SH_L1_HAS_NEXT_PAGE )
        unmap_domain_page(pt_next);

    return found;
}

static int remove_all_write_access(
    struct domain *d, unsigned long readonly_gpfn, unsigned long readonly_gmfn)
{
    int i;
    struct shadow_status *a;
    u32 found = 0, write_refs;
    unsigned long predicted_smfn;

    ASSERT(shadow_lock_is_acquired(d));
    ASSERT(VALID_MFN(readonly_gmfn));

    perfc_incrc(remove_write_access);

    // If it's not a writable page, then no writable refs can be outstanding.
    //
    if ( (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
         PGT_writable_page )
    {
        perfc_incrc(remove_write_not_writable);
        return 1;
    }

    // How many outstanding writable PTEs for this page are there?
    //
    write_refs =
        (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
    if ( write_refs && MFN_PINNED(readonly_gmfn) )
    {
        write_refs--;
    }

    if ( write_refs == 0 )
    {
        perfc_incrc(remove_write_no_work);
        return 1;
    }

    if ( shadow_mode_external(d) ) {
        if (--write_refs == 0)
            return 0;

         // Use the back pointer to locate the shadow page that can contain
         // the PTE of interest
         if ( (predicted_smfn = mfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
             found += remove_all_write_access_in_ptpage(
                 d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
             if ( found == write_refs )
                 return 0;
         }
    }

    // Search all the shadow L1 page tables...
    //
    for (i = 0; i < shadow_ht_buckets; i++)
    {
        a = &d->arch.shadow_ht[i];
        while ( a && a->gpfn_and_flags )
        {
            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow
#if CONFIG_PAGING_LEVELS >= 4
              || (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow
#endif
              )

            {
                found += remove_all_write_access_in_ptpage(d, a->gpfn_and_flags & PGT_mfn_mask, a->smfn, readonly_gpfn, readonly_gmfn, write_refs - found, a->gpfn_and_flags & PGT_mfn_mask);
                if ( found == write_refs )
                    return 0;
            }

            a = a->next;
        }
    }

    FSH_LOG("%s: looking for %d refs, found %d refs",
            __func__, write_refs, found);

    return 0;
}


static int resync_all(struct domain *d, u32 stype)
{
    struct out_of_sync_entry *entry;
    unsigned i;
    unsigned long smfn;
    void *guest, *shadow, *snapshot;
    int need_flush = 0, external = shadow_mode_external(d);
    int unshadow;
    int changed;
    u32 min_max_shadow, min_max_snapshot;
    int min_shadow, max_shadow, min_snapshot, max_snapshot;
    struct vcpu *v;

    ASSERT(shadow_lock_is_acquired(d));

    for ( entry = d->arch.out_of_sync; entry; entry = entry->next)
    {
        int max = -1;

        if ( entry->snapshot_mfn == SHADOW_SNAPSHOT_ELSEWHERE )
            continue;

        smfn = __shadow_status(d, entry->gpfn, stype);

        if ( !smfn )
        {
            // For heavy weight shadows: no need to update refcounts if
            // there's no shadow page.
            //
            if ( shadow_mode_refcounts(d) )
                continue;

            // For light weight shadows: only need up resync the refcounts to
            // the new contents of the guest page iff this it has the right
            // page type.
            //
            if ( stype != ( mfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
                continue;
        }

        FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx",
                stype, entry->gpfn, entry->gmfn, smfn, entry->snapshot_mfn);

        // Compare guest's new contents to its snapshot, validating
        // and updating its shadow as appropriate.
        //
        guest    = map_domain_page(entry->gmfn);
        snapshot = map_domain_page(entry->snapshot_mfn);

        if ( smfn )
            shadow = map_domain_page(smfn);
        else
            shadow = NULL;

        unshadow = 0;

        min_max_shadow = mfn_to_page(smfn)->tlbflush_timestamp;
        min_shadow     = SHADOW_MIN(min_max_shadow);
        max_shadow     = SHADOW_MAX(min_max_shadow);

        min_max_snapshot= mfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
        min_snapshot    = SHADOW_MIN(min_max_snapshot);
        max_snapshot    = SHADOW_MAX(min_max_snapshot);

        switch ( stype )
        {
        case PGT_l1_shadow:
        {
            guest_l1_pgentry_t *guest1 = guest;
            l1_pgentry_t *shadow1 = shadow;
            l1_pgentry_t *shadow1_next = 0, *sl1e_p;
            guest_l1_pgentry_t *snapshot1 = snapshot;
            int unshadow_l1 = 0;

            ASSERT(shadow_mode_write_l1(d) ||
                   shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));

            if ( !shadow_mode_refcounts(d) )
                revalidate_l1(d, (l1_pgentry_t *)guest1, (l1_pgentry_t *)snapshot1);
            if ( !smfn )
                break;

            changed = 0;

            if ( SH_L1_HAS_NEXT_PAGE && shadow1 )
                shadow1_next = map_domain_page(smfn + 1);

            for ( i = min_shadow; i <= max_shadow; i++ )
            {

                if ( SH_L1_HAS_NEXT_PAGE && i >= L1_PAGETABLE_ENTRIES )
                    sl1e_p = &shadow1_next[i - L1_PAGETABLE_ENTRIES];
                else
                    sl1e_p = &shadow1[i];

                if ( (i < min_snapshot) || (i > max_snapshot) ||
                     guest_l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
                {
                    int error;

                    error = validate_pte_change(d, guest1[i], sl1e_p);
                    if ( error ==  -1 )
                        unshadow_l1 = 1;
                    else {
                        need_flush |= error;
                        set_guest_back_ptr(d, *sl1e_p, smfn, i);
                    }
                    // can't update snapshots of linear page tables -- they
                    // are used multiple times...
                    //
                    // snapshot[i] = new_pte;

                    changed++;
                }
            }

            if ( shadow1_next )
                unmap_domain_page(shadow1_next);

            perfc_incrc(resync_l1);
            perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
            perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, PT_UPDATES);
            if ( d->arch.ops->guest_paging_levels >= PAGING_L3 &&
                 unshadow_l1 ) {
                pgentry_64_t l2e = { 0 };

                __shadow_get_l2e(entry->v, entry->va, &l2e);

                if ( entry_get_flags(l2e) & _PAGE_PRESENT ) {
                    put_shadow_ref(entry_get_pfn(l2e));
                    l2e = entry_empty();
                    __shadow_set_l2e(entry->v, entry->va, &l2e);

                    if (entry->v == current)
                        need_flush = 1;
                }
            }

            break;
        }
#if CONFIG_PAGING_LEVELS == 2
        case PGT_l2_shadow:
        {
            l2_pgentry_t *guest2 = guest;
            l2_pgentry_t *shadow2 = shadow;
            l2_pgentry_t *snapshot2 = snapshot;

            ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
            BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented

            changed = 0;
            for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
            {
                if ( !is_guest_l2_slot(0,i) && !external )
                    continue;

                l2_pgentry_t new_pde = guest2[i];
                if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK))
                {
                    need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);

                    // can't update snapshots of linear page tables -- they
                    // are used multiple times...
                    //
                    // snapshot[i] = new_pde;

                    changed++;
                }
                if ( l2e_get_intpte(new_pde) != 0 ) /* FIXME: check flags? */
                    max = i;

                // XXX - This hack works for linux guests.
                //       Need a better solution long term.
                if ( !(l2e_get_flags(new_pde) & _PAGE_PRESENT) &&
                     unlikely(l2e_get_intpte(new_pde) != 0) &&
                     !unshadow && MFN_PINNED(smfn) )
                    unshadow = 1;
            }
            if ( max == -1 )
                unshadow = 1;
            perfc_incrc(resync_l2);
            perfc_incr_histo(shm_l2_updates, changed, PT_UPDATES);
            break;
        }
        case PGT_hl2_shadow:
        {
            l2_pgentry_t *guest2 = guest;
            l2_pgentry_t *snapshot2 = snapshot;
            l1_pgentry_t *shadow2 = shadow;

            ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
            BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented

            changed = 0;
            for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
            {
                if ( !is_guest_l2_slot(0, i) && !external )
                    continue;

                l2_pgentry_t new_pde = guest2[i];
                if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK) )
                {
                    need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]);

                    // can't update snapshots of linear page tables -- they
                    // are used multiple times...
                    //
                    // snapshot[i] = new_pde;

                    changed++;
                }
            }
            perfc_incrc(resync_hl2);
            perfc_incr_histo(shm_hl2_updates, changed, PT_UPDATES);
            break;
        }
#elif CONFIG_PAGING_LEVELS >= 3
        case PGT_l2_shadow:
        case PGT_l3_shadow:
        {
            pgentry_64_t *guest_pt = guest;
            pgentry_64_t *shadow_pt = shadow;
            pgentry_64_t *snapshot_pt = snapshot;

            changed = 0;
            for ( i = min_shadow; i <= max_shadow; i++ )
            {
                if ( (i < min_snapshot) || (i > max_snapshot) ||
                  entry_has_changed(
                      guest_pt[i], snapshot_pt[i], PAGE_FLAG_MASK) )
                {
                    need_flush |= validate_entry_change(
                        d, &guest_pt[i], &shadow_pt[i],
                        shadow_type_to_level(stype));
                    changed++;
                }
#if CONFIG_PAGING_LEVELS == 3
                if ( stype == PGT_l3_shadow ) 
                {
                    if ( entry_get_value(guest_pt[i]) != 0 ) 
                        max = i;

                    if ( !(entry_get_flags(guest_pt[i]) & _PAGE_PRESENT) &&
                         unlikely(entry_get_value(guest_pt[i]) != 0) &&
                         !unshadow &&
                         (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
                        unshadow = 1;
                }
#endif
            }

            if ( d->arch.ops->guest_paging_levels == PAGING_L3
                 && max == -1 && stype == PGT_l3_shadow )
                unshadow = 1;

            perfc_incrc(resync_l3);
            perfc_incr_histo(shm_l3_updates, changed, PT_UPDATES);
            break;
        }
        case PGT_l4_shadow:
        {
            guest_root_pgentry_t *guest_root = guest;
            guest_root_pgentry_t *snapshot_root = snapshot;

            changed = 0;
            for ( i = 0; i < GUEST_ROOT_PAGETABLE_ENTRIES; i++ )
            {
                guest_root_pgentry_t new_root_e = guest_root[i];
                if ( !is_guest_l4_slot(i) && !external )
                    continue;
                if ( root_entry_has_changed(
                        new_root_e, snapshot_root[i], PAGE_FLAG_MASK))
                {
#ifndef GUEST_PGENTRY_32
                    l4_pgentry_t *shadow4 = shadow;

                    if ( d->arch.ops->guest_paging_levels == PAGING_L4 ) 
                    {
                        need_flush |= validate_entry_change(
                          d, (pgentry_64_t *)&new_root_e,
                          (pgentry_64_t *)&shadow4[i], shadow_type_to_level(stype));
                    }
                    else
#endif
                    {
                        validate_bl2e_change(d, &new_root_e, shadow, i);
                    }
                    changed++;
                    ESH_LOG("%d: shadow4 mfn: %lx, shadow root: %lx\n", i,
                      smfn, pagetable_get_paddr(current->arch.shadow_table));
                }
                if ( guest_root_get_intpte(new_root_e) != 0 ) /* FIXME: check flags? */
                    max = i;

                //  Need a better solution in the long term.
                if ( !(guest_root_get_flags(new_root_e) & _PAGE_PRESENT) &&
                     unlikely(guest_root_get_intpte(new_root_e) != 0) &&
                     !unshadow &&
                     (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
                    unshadow = 1;
            }
            if ( max == -1 )
                unshadow = 1;
            perfc_incrc(resync_l4);
            perfc_incr_histo(shm_l4_updates, changed, PT_UPDATES);
            break;
        }

#endif /* CONFIG_PAGING_LEVELS >= 3 */
        default:
            BUG();
        }

        if ( smfn )
            unmap_domain_page(shadow);
        unmap_domain_page(snapshot);
        unmap_domain_page(guest);

        if ( unlikely(unshadow) )
        {
            for_each_vcpu(d, v)
                if(smfn == pagetable_get_pfn(v->arch.shadow_table))
                    return need_flush;
            perfc_incrc(unshadow_l2_count);
            shadow_unpin(smfn);
#if CONFIG_PAGING_LEVELS == 2
            if ( unlikely(shadow_mode_external(d)) )
            {
                unsigned long hl2mfn;

                if ( (hl2mfn = __shadow_status(d, entry->gpfn, PGT_hl2_shadow)) &&
                     MFN_PINNED(hl2mfn) )
                    shadow_unpin(hl2mfn);
            }
#endif
        }
    }

    return need_flush;
}

static void sync_all(struct domain *d)
{
    struct out_of_sync_entry *entry;
    int need_flush = 0;
    l1_pgentry_t *ppte, opte, npte;
    cpumask_t other_vcpus_mask;

    perfc_incrc(shadow_sync_all);

    ASSERT(shadow_lock_is_acquired(d));

    // First, remove all write permissions to the page tables
    //
    for ( entry = d->arch.out_of_sync; entry; entry = entry->next)
    {
        // Skip entries that have low bits set...  Those aren't
        // real PTEs.
        //
        if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
            continue;

        ppte = (l1_pgentry_t *)(
            (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) +
            (entry->writable_pl1e & ~PAGE_MASK));
        opte = npte = *ppte;
        l1e_remove_flags(npte, _PAGE_RW);

        if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
             !shadow_get_page_from_l1e(npte, d) )
            BUG();
        *ppte = npte;
        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT,
                           (entry->writable_pl1e & ~PAGE_MASK)/sizeof(l1_pgentry_t));
        shadow_put_page_from_l1e(opte, d);

        unmap_domain_page(ppte);
    }

    /* Other VCPUs mustn't use the revoked writable mappings. */
    other_vcpus_mask = d->domain_dirty_cpumask;
    cpu_clear(smp_processor_id(), other_vcpus_mask);
    flush_tlb_mask(other_vcpus_mask);

    /* Flush ourself later. */
    need_flush = 1;

    /* Second, resync all L1 pages, then L2 pages, etc... */
    need_flush |= resync_all(d, PGT_l1_shadow);

#if CONFIG_PAGING_LEVELS == 2
    if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
         shadow_mode_translate(d) )  
    {
        need_flush |= resync_all(d, PGT_hl2_shadow);
    }
#endif

#if CONFIG_PAGING_LEVELS >= 3
    if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
        need_flush |= resync_all(d, PGT_l4_shadow);
    else
        need_flush |= resync_all(d, PGT_l2_shadow);

    if ( d->arch.ops->guest_paging_levels >= PAGING_L3 )
    {
        need_flush |= resync_all(d, PGT_l3_shadow);
        need_flush |= resync_all(d, PGT_l4_shadow);
    }
#endif

    if ( need_flush && !unlikely(shadow_mode_external(d)) )
        local_flush_tlb();

    free_out_of_sync_state(d);
}

static inline int l1pte_write_fault(
    struct vcpu *v, guest_l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
    unsigned long va)
{
    struct domain *d = v->domain;
    guest_l1_pgentry_t gpte = *gpte_p;
    l1_pgentry_t spte;
    unsigned long gpfn = l1e_get_pfn(gpte);
    unsigned long gmfn = gmfn_to_mfn(d, gpfn);

    //printk("l1pte_write_fault gmfn=%lx\n", gmfn);

    if ( unlikely(!VALID_MFN(gmfn)) )
    {
        SH_VLOG("l1pte_write_fault: invalid gpfn=%lx", gpfn);
        *spte_p = l1e_empty();
        return 0;
    }

    ASSERT(guest_l1e_get_flags(gpte) & _PAGE_RW);
    guest_l1e_add_flags(gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
    spte = l1e_from_pfn(gmfn, guest_l1e_get_flags(gpte) & ~_PAGE_GLOBAL);

    SH_VVLOG("l1pte_write_fault: updating spte=0x%" PRIpte " gpte=0x%" PRIpte,
             l1e_get_intpte(spte), l1e_get_intpte(gpte));

    __mark_dirty(d, gmfn);

    if ( mfn_is_page_table(gmfn) )
        shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);

    *gpte_p = gpte;
    *spte_p = spte;

    return 1;
}

static inline int l1pte_read_fault(
    struct domain *d, guest_l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
{
    guest_l1_pgentry_t gpte = *gpte_p;
    l1_pgentry_t spte = *spte_p;
    unsigned long pfn = l1e_get_pfn(gpte);
    unsigned long mfn = gmfn_to_mfn(d, pfn);

    if ( unlikely(!VALID_MFN(mfn)) )
    {
        SH_VLOG("l1pte_read_fault: invalid gpfn=%lx", pfn);
        *spte_p = l1e_empty();
        return 0;
    }

    guest_l1e_add_flags(gpte, _PAGE_ACCESSED);
    spte = l1e_from_pfn(mfn, guest_l1e_get_flags(gpte) & ~_PAGE_GLOBAL);

    if ( shadow_mode_log_dirty(d) || !(guest_l1e_get_flags(gpte) & _PAGE_DIRTY) ||
         mfn_is_page_table(mfn) )
    {
        l1e_remove_flags(spte, _PAGE_RW);
    }

    SH_VVLOG("l1pte_read_fault: updating spte=0x%" PRIpte " gpte=0x%" PRIpte,
             l1e_get_intpte(spte), l1e_get_intpte(gpte));
    *gpte_p = gpte;
    *spte_p = spte;

    return 1;
}
#if CONFIG_PAGING_LEVELS == 2
static int shadow_fault_32(unsigned long va, struct cpu_user_regs *regs)
{
    l1_pgentry_t gpte, spte, orig_gpte;
    struct vcpu *v = current;
    struct domain *d = v->domain;
    l2_pgentry_t gpde;

    spte = l1e_empty();

    SH_VVLOG("shadow_fault( va=%lx, code=%lu )",
             va, (unsigned long)regs->error_code);
    perfc_incrc(shadow_fault_calls);

    check_pagetable(v, "pre-sf");

    /*
     * Don't let someone else take the guest's table pages out-of-sync.
     */
    shadow_lock(d);

    /* XXX - FIX THIS COMMENT!!!
     * STEP 1. Check to see if this fault might have been caused by an
     *         out-of-sync table page entry, or if we should pass this
     *         fault onto the guest.
     */
    __shadow_sync_va(v, va);

    /*
     * STEP 2. Check the guest PTE.
     */
    __guest_get_l2e(v, va, &gpde);
    if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
    {
        SH_VVLOG("shadow_fault - EXIT: L1 not present");
        perfc_incrc(shadow_fault_bail_pde_not_present);
        goto fail;
    }

    // This can't fault because we hold the shadow lock and we've ensured that
    // the mapping is in-sync, so the check of the PDE's present bit, above,
    // covers this access.
    //
    //orig_gpte = gpte = linear_pg_table[l1_linear_offset(va)];
    __guest_get_l1e(v, va, &gpte);
    orig_gpte = gpte;

    if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
    {
        SH_VVLOG("shadow_fault - EXIT: gpte not present (%" PRIpte ")",
                 l1e_get_intpte(gpte));
        perfc_incrc(shadow_fault_bail_pte_not_present);
        goto fail;
    }

    /* Write fault? */
    if ( regs->error_code & 2 )
    {
        int allow_writes = 0;

        if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_RW)) )
        {
            if ( shadow_mode_page_writable(va, regs, l1e_get_pfn(gpte)) )
            {
                allow_writes = 1;
                l1e_add_flags(gpte, _PAGE_RW);
            }
            else
            {
                /* Write fault on a read-only mapping. */
                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")",
                         l1e_get_intpte(gpte));
                perfc_incrc(shadow_fault_bail_ro_mapping);
                goto fail;
            }
        }
        else if ( unlikely(!shadow_mode_wr_pt_pte(d) && mfn_is_page_table(l1e_get_pfn(gpte))) )
        {
            SH_LOG("l1pte_write_fault: no write access to page table page");
            domain_crash_synchronous();
        }

        if ( unlikely(!l1pte_write_fault(v, &gpte, &spte, va)) )
        {
            SH_VVLOG("shadow_fault - EXIT: l1pte_write_fault failed");
            perfc_incrc(write_fault_bail);
            shadow_unlock(d);
            return 0;
        }

        if ( allow_writes )
            l1e_remove_flags(gpte, _PAGE_RW);
    }
    else
    {
        if ( !l1pte_read_fault(d, &gpte, &spte) )
        {
            SH_VVLOG("shadow_fault - EXIT: l1pte_read_fault failed");
            perfc_incrc(read_fault_bail);
            shadow_unlock(d);
            return 0;
        }
    }

    /*
     * STEP 3. Write the modified shadow PTE and guest PTE back to the tables.
     */
    if ( l1e_has_changed(orig_gpte, gpte, PAGE_FLAG_MASK) )
    {
        /* XXX Watch out for read-only L2 entries! (not used in Linux). */
        /*if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
                                     &gpte, sizeof(gpte))) )*/
        if ( unlikely(!__guest_set_l1e(v, va, &gpte)))
        {
            printk("%s() failed, crashing domain %d "
                   "due to a read-only L2 page table (gpde=%" PRIpte "), va=%lx\n",
                   __func__,d->domain_id, l2e_get_intpte(gpde), va);
            domain_crash_synchronous();
        }

        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gpde)));
    }

    shadow_set_l1e(va, spte, 1);

    perfc_incrc(shadow_fault_fixed);
    d->arch.shadow_fault_count++;

    shadow_unlock(d);

    check_pagetable(v, "post-sf");
    return EXCRET_fault_fixed;

fail:
    shadow_unlock(d);
    return 0;
}
#endif /* CONFIG_PAGING_LEVELS == 2 */

static inline unsigned long va_to_l1mfn(struct vcpu *v, unsigned long va)
{
    struct domain *d = v->domain;
    guest_l2_pgentry_t gl2e = {0};

    __guest_get_l2e(v, va, &gl2e);
    
    if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PRESENT)) )
        return INVALID_MFN;

    return gmfn_to_mfn(d, l2e_get_pfn(gl2e));
}

static int do_update_va_mapping(unsigned long va,
                                l1_pgentry_t val,
                                struct vcpu *v)
{
    struct domain *d = v->domain;
    l1_pgentry_t spte;
    int rc = 0;

    shadow_lock(d);

    // This is actually overkill - we don't need to sync the L1 itself,
    // just everything involved in getting to this L1 (i.e. we need
    // linear_pg_table[l1_linear_offset(va)] to be in sync)...
    //
    __shadow_sync_va(v, va);

    l1pte_propagate_from_guest(d, *(guest_l1_pgentry_t *)&val, &spte);
#if CONFIG_PAGING_LEVELS == 2
    shadow_set_l1e(va, spte, 0);
#elif CONFIG_PAGING_LEVELS >= 3
    shadow_set_l1e_64(va, (pgentry_64_t *) &spte, 0);
#endif
    /*
     * If we're in log-dirty mode then we need to note that we've updated
     * the PTE in the PT-holding page. We need the machine frame number
     * for this.
     */
    __mark_dirty(d, va_to_l1mfn(v, va));

    shadow_unlock(d);

    return rc;
}


/*
 * What lives where in the 32-bit address space in the various shadow modes,
 * and what it uses to get/maintain that mapping.
 *
 * SHADOW MODE:      none         enable         translate         external
 *
 * 4KB things:
 * guest_vtable    lin_l2     mapped per gl2   lin_l2 via hl2   mapped per gl2
 * shadow_vtable     n/a         sh_lin_l2       sh_lin_l2      mapped per gl2
 * hl2_vtable        n/a            n/a        lin_hl2 via hl2  mapped per gl2
 * monitor_vtable    n/a            n/a             n/a           mapped once
 *
 * 4MB things:
 * guest_linear  lin via gl2    lin via gl2      lin via hl2      lin via hl2
 * shadow_linear     n/a      sh_lin via sl2   sh_lin via sl2   sh_lin via sl2
 * monitor_linear    n/a            n/a             n/a              ???
 * perdomain      perdomain      perdomain       perdomain        perdomain
 * R/O M2P         R/O M2P        R/O M2P           n/a              n/a
 * R/W M2P         R/W M2P        R/W M2P         R/W M2P          R/W M2P
 * P2M               n/a            n/a           R/O M2P          R/O M2P
 *
 * NB:
 * update_pagetables(), shadow_update_pagetables(), shadow_mode_enable(),
 * shadow_l2_table(), shadow_hl2_table(), and alloc_monitor_pagetable()
 * all play a part in maintaining these mappings.
 */
static void shadow_update_pagetables(struct vcpu *v)
{
    struct domain *d = v->domain;
#if CONFIG_PAGING_LEVELS == 4
    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)?
                          pagetable_get_pfn(v->arch.guest_table) :
                          pagetable_get_pfn(v->arch.guest_table_user));
#else
    unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
#endif

    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
    unsigned long smfn, old_smfn;

#if CONFIG_PAGING_LEVELS == 2
    unsigned long hl2mfn;
#endif
    int need_sync = 0;

    int max_mode = ( shadow_mode_external(d) ? SHM_external
                     : shadow_mode_translate(d) ? SHM_translate
                     : shadow_mode_enabled(d) ? SHM_enable
                     : 0 );

    ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) );
    ASSERT( max_mode );

    /*
     *  arch.guest_vtable
     */
    if ( max_mode & (SHM_enable | SHM_external) )
    {
        if ( likely(v->arch.guest_vtable != NULL) )
            unmap_domain_page_global(v->arch.guest_vtable);
        v->arch.guest_vtable = map_domain_page_global(gmfn);
    }

    /*
     *  arch.shadow_table
     */
    if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_base_page_table))) ) 
    {
#if CONFIG_PAGING_LEVELS == 2
        smfn = shadow_l2_table(d, gpfn, gmfn);
#elif CONFIG_PAGING_LEVELS == 3
        smfn = shadow_l3_table(d, gpfn, gmfn);
#elif CONFIG_PAGING_LEVELS == 4
        smfn = shadow_l4_table(d, gpfn, gmfn);
#endif
    }
    else
    {
        /*
         *  move sync later in order to avoid this smfn been 
         *  unshadowed occasionally
         */
        need_sync = 1;
    }


    if ( !get_shadow_ref(smfn) )
        BUG();
    old_smfn = pagetable_get_pfn(v->arch.shadow_table);
    v->arch.shadow_table = mk_pagetable((u64)smfn << PAGE_SHIFT);
    if ( old_smfn )
        put_shadow_ref(old_smfn);

    SH_VVLOG("shadow_update_pagetables(gmfn=%lx, smfn=%lx)", gmfn, smfn);

    /*
     * arch.shadow_vtable
     */
    if ( max_mode == SHM_external
#if CONFIG_PAGING_LEVELS >=3
         || max_mode & SHM_enable
#endif
        )
    {
        if ( v->arch.shadow_vtable )
            unmap_domain_page_global(v->arch.shadow_vtable);
        v->arch.shadow_vtable = map_domain_page_global(smfn);
    }

#if CONFIG_PAGING_LEVELS == 2
    /*
     * arch.hl2_vtable
     */

    // if max_mode == SHM_translate, then the hl2 is already installed
    // correctly in its smfn, and there's nothing to do.
    //
    if ( max_mode == SHM_external )
    {
        if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
            hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
        if ( v->arch.hl2_vtable )
            unmap_domain_page_global(v->arch.hl2_vtable);
        v->arch.hl2_vtable = map_domain_page_global(hl2mfn);
    }

    /*
     * fixup pointers in monitor table, as necessary
     */
    if ( max_mode == SHM_external )
    {
        l2_pgentry_t *mpl2e = v->arch.monitor_vtable;
        l2_pgentry_t old_hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)];
        l2_pgentry_t old_sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)];

        ASSERT( shadow_mode_translate(d) );

        if ( !get_shadow_ref(hl2mfn) )
            BUG();
        mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
            l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
        if ( l2e_get_flags(old_hl2e) & _PAGE_PRESENT )
            put_shadow_ref(l2e_get_pfn(old_hl2e));

        if ( !get_shadow_ref(smfn) )
            BUG();
        mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
            l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
        if ( l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
            put_shadow_ref(l2e_get_pfn(old_sl2e));

        // XXX - maybe this can be optimized somewhat??
        local_flush_tlb();
    }
#endif /* CONFIG_PAGING_LEVELS == 2 */

#if CONFIG_PAGING_LEVELS == 3
    /*
     * fixup pointers in monitor table, as necessary
     */
    if ( max_mode == SHM_external )
    {
        l3_pgentry_t *mpl3e = (l3_pgentry_t *) v->arch.monitor_vtable;
        l2_pgentry_t *spl2e;
        unsigned long s2mfn;
        int i;
 
        ASSERT( shadow_mode_translate(d) );
        s2mfn = l3e_get_pfn(mpl3e[L3_PAGETABLE_ENTRIES - 1]);
 
        ASSERT( s2mfn);
        spl2e = map_domain_page(s2mfn);
 
        for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
            spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START) + i] =
                (l3e_get_flags(mpl3e[i]) & _PAGE_PRESENT) ?
                l2e_from_pfn(l3e_get_pfn(mpl3e[i]), __PAGE_HYPERVISOR) :
                l2e_empty();
 
        unmap_domain_page(spl2e);
        local_flush_tlb();
    }
#endif

    if(likely(need_sync))
        shadow_sync_all(d);
}


/************************************************************************/
/************************************************************************/
/************************************************************************/

#if 0 // this code has not been updated for 32pae & 64 bit modes
#if SHADOW_DEBUG

// The following is entirely for _check_pagetable()'s benefit.
// _check_pagetable() wants to know whether a given entry in a
// shadow page table is supposed to be the shadow of the guest's
// current entry, or the shadow of the entry held in the snapshot
// taken above.
//
// Here, we mark all currently existing entries as reflecting
// the snapshot, above.  All other places in xen that update
// the shadow will keep the shadow in sync with the guest's
// entries (via l1pte_propagate_from_guest and friends), which clear
// the SHADOW_REFLECTS_SNAPSHOT bit.
//
static void
mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn)
{
    unsigned long smfn;
    l1_pgentry_t *l1e;
    l2_pgentry_t *l2e;
    unsigned i;

    if ( (smfn = __shadow_status(d, gpfn, PGT_l1_shadow)) )
    {
        l1e = map_domain_page(smfn);
        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
            if ( is_guest_l1_slot(i) &&
                 (l1e_get_flags(l1e[i]) & _PAGE_PRESENT) )
                l1e_add_flags(l1e[i], SHADOW_REFLECTS_SNAPSHOT);
        unmap_domain_page(l1e);
    }

    if ( (smfn = __shadow_status(d, gpfn, PGT_l2_shadow)) )
    {
        l2e = map_domain_page(smfn);
        for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
            if ( is_guest_l2_slot(0, i) &&
                 (l2e_get_flags(l2e[i]) & _PAGE_PRESENT) )
                l2e_add_flags(l2e[i], SHADOW_REFLECTS_SNAPSHOT);
        unmap_domain_page(l2e);
    }
}

// BUG: these are not SMP safe...
static int sh_l2_present;
static int sh_l1_present;
static char *sh_check_name;
// int shadow_status_noswap; // declared in shadow32.c

#define v2m(_v, _adr) ({                                                     \
    unsigned long _a  = (unsigned long)(_adr);                               \
    l2_pgentry_t _pde = shadow_linear_l2_table(_v)[l2_table_offset(_a)];     \
    unsigned long _pa = -1;                                                  \
    if ( l2e_get_flags(_pde) & _PAGE_PRESENT )                               \
    {                                                                        \
        l1_pgentry_t _pte;                                                   \
        _pte = shadow_linear_pg_table[l1_linear_offset(_a)];                 \
        if ( l1e_get_flags(_pte) & _PAGE_PRESENT )                           \
            _pa = l1e_get_paddr(_pte);                                       \
    }                                                                        \
    _pa | (_a & ~PAGE_MASK);                                                 \
})

#define FAIL(_f, _a...)                                                      \
    do {                                                                     \
        printk("XXX %s-FAIL (%d,%d,%d) " _f " at %s(%d)\n",                  \
               sh_check_name, level, l2_idx, l1_idx, ## _a,                  \
               __FILE__, __LINE__);                                          \
        printk("guest_pte=%" PRIpte " eff_guest_pte=%" PRIpte                \
               " shadow_pte=%" PRIpte " snapshot_pte=%" PRIpte               \
               " &guest=%p &shadow=%p &snap=%p v2m(&guest)=%p"               \
               " v2m(&shadow)=%p v2m(&snap)=%p ea=%08x\n",                   \
               l1e_get_intpte(guest_pte), l1e_get_intpte(eff_guest_pte),     \
               l1e_get_intpte(shadow_pte), l1e_get_intpte(snapshot_pte),     \
               p_guest_pte, p_shadow_pte, p_snapshot_pte,                    \
               (void *)v2m(v, p_guest_pte), (void *)v2m(v, p_shadow_pte),    \
               (void *)v2m(v, p_snapshot_pte),                               \
               (l2_idx << L2_PAGETABLE_SHIFT) |                              \
               (l1_idx << L1_PAGETABLE_SHIFT));                              \
        errors++;                                                            \
    } while ( 0 )

static int check_pte(
    struct vcpu *v,
    l1_pgentry_t *p_guest_pte,
    l1_pgentry_t *p_shadow_pte,
    l1_pgentry_t *p_snapshot_pte,
    int level, int l2_idx, int l1_idx)
{
    struct domain *d = v->domain;
    l1_pgentry_t guest_pte = *p_guest_pte;
    l1_pgentry_t shadow_pte = *p_shadow_pte;
    l1_pgentry_t snapshot_pte = p_snapshot_pte ? *p_snapshot_pte : l1e_empty();
    l1_pgentry_t eff_guest_pte;
    unsigned long mask, eff_guest_pfn, eff_guest_mfn, shadow_mfn;
    int errors = 0, guest_writable;
    int page_table_page;

    if ( (l1e_get_intpte(shadow_pte) == 0) ||
         (l1e_get_intpte(shadow_pte) == 0xdeadface) ||
         (l1e_get_intpte(shadow_pte) == 0x00000E00) )
        return errors;  /* always safe */

    if ( !(l1e_get_flags(shadow_pte) & _PAGE_PRESENT) )
        FAIL("Non zero not present shadow_pte");

    if ( level == 2 ) sh_l2_present++;
    if ( level == 1 ) sh_l1_present++;

    if ( (l1e_get_flags(shadow_pte) & SHADOW_REFLECTS_SNAPSHOT) && p_snapshot_pte )
        eff_guest_pte = snapshot_pte;
    else
        eff_guest_pte = guest_pte;

    if ( !(l1e_get_flags(eff_guest_pte) & _PAGE_PRESENT) )
        FAIL("Guest not present yet shadow is");

    mask = ~(_PAGE_GLOBAL|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|_PAGE_AVAIL|PAGE_MASK);

    if ( ((l1e_get_intpte(shadow_pte) & mask) != (l1e_get_intpte(eff_guest_pte) & mask)) )
        FAIL("Corrupt?");

    if ( (level == 1) &&
         (l1e_get_flags(shadow_pte) & _PAGE_DIRTY) &&
         !(l1e_get_flags(eff_guest_pte) & _PAGE_DIRTY) )
        FAIL("Dirty coherence");

    if ( (l1e_get_flags(shadow_pte) & _PAGE_ACCESSED) &&
         !(l1e_get_flags(eff_guest_pte) & _PAGE_ACCESSED) )
        FAIL("Accessed coherence");

    if ( l1e_get_flags(shadow_pte) & _PAGE_GLOBAL )
        FAIL("global bit set in shadow");

    eff_guest_pfn = l1e_get_pfn(eff_guest_pte);
    eff_guest_mfn = gmfn_to_mfn(d, eff_guest_pfn);
    shadow_mfn = l1e_get_pfn(shadow_pte);

    if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
        FAIL("%s: invalid eff_guest_pfn=%lx eff_guest_pte=%" PRIpte "\n",
             __func__, eff_guest_pfn, l1e_get_intpte(eff_guest_pte));

    page_table_page = mfn_is_page_table(eff_guest_mfn);

    guest_writable =
        (l1e_get_flags(eff_guest_pte) & _PAGE_RW) ||
        (shadow_mode_write_l1(d) && (level == 1) && mfn_out_of_sync(eff_guest_mfn));

    if ( (l1e_get_flags(shadow_pte) & _PAGE_RW ) && !guest_writable )
    {
        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
               eff_guest_pfn, eff_guest_mfn, shadow_mfn,
               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
               page_table_page);
        FAIL("RW coherence");
    }

    if ( (level == 1) &&
         (l1e_get_flags(shadow_pte) & _PAGE_RW ) &&
         !(guest_writable && (l1e_get_flags(eff_guest_pte) & _PAGE_DIRTY)) )
    {
        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
               eff_guest_pfn, eff_guest_mfn, shadow_mfn,
               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
               page_table_page);
        FAIL("RW2 coherence");
    }

    if ( eff_guest_mfn == shadow_mfn )
    {
        if ( level > 1 )
            FAIL("Linear map ???");    /* XXX this will fail on BSD */
    }
    else
    {
        if ( level < 2 )
            FAIL("Shadow in L1 entry?");

        if ( level == 2 )
        {
            if ( __shadow_status(d, eff_guest_pfn, PGT_l1_shadow) != shadow_mfn )
                FAIL("shadow_mfn problem eff_guest_pfn=%lx shadow_mfn=%lx", eff_guest_pfn,
                     __shadow_status(d, eff_guest_pfn, PGT_l1_shadow));
        }
        else
            BUG(); // XXX -- not handled yet.
    }

    return errors;
}
#undef FAIL
#undef v2m

static int check_l1_table(
    struct vcpu *v, unsigned long gpfn,
    unsigned long gmfn, unsigned long smfn, unsigned l2_idx)
{
    struct domain *d = v->domain;
    int i;
    unsigned long snapshot_mfn;
    l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
    int errors = 0;

    if ( page_out_of_sync(mfn_to_page(gmfn)) )
    {
        snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
        ASSERT(snapshot_mfn);
        p_snapshot = map_domain_page(snapshot_mfn);
    }

    p_guest  = map_domain_page(gmfn);
    p_shadow = map_domain_page(smfn);

    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
        errors += check_pte(v, p_guest+i, p_shadow+i,
                            p_snapshot ? p_snapshot+i : NULL,
                            1, l2_idx, i);

    unmap_domain_page(p_shadow);
    unmap_domain_page(p_guest);
    if ( p_snapshot )
        unmap_domain_page(p_snapshot);

    return errors;
}

#define FAILPT(_f, _a...)                                         \
    do {                                                          \
        printk("XXX FAIL %s-PT " _f "\n", sh_check_name, ## _a ); \
        errors++;                                                 \
    } while ( 0 )

static int check_l2_table(
    struct vcpu *v, unsigned long gmfn, unsigned long smfn, int oos_pdes)
{
    struct domain *d = v->domain;
    l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_page(gmfn);
    l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_page(smfn);
    l2_pgentry_t match;
    int i;
    int errors = 0;
    int limit;

    if ( !oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != d) )
        FAILPT("domain doesn't own page");
    if ( oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != NULL) )
        FAILPT("bogus owner for snapshot page");
    if ( page_get_owner(mfn_to_page(smfn)) != NULL )
        FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
               smfn, page_get_owner(mfn_to_page(smfn))->domain_id);

#if 0
    if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
                 DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
    {
        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
              i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
              i++ )
            printk("+++ (%d) %lx %lx\n",i,
                   l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]));
        FAILPT("hypervisor entries inconsistent");
    }

    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
          l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
        FAILPT("hypervisor linear map inconsistent");
#endif

    match = l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
    if ( !shadow_mode_external(d) &&
         l2e_has_changed(spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
                         match, PAGE_FLAG_MASK))
    {
        FAILPT("hypervisor shadow linear map inconsistent %" PRIpte " %" PRIpte,
               l2e_get_intpte(spl2e[SH_LINEAR_PT_VIRT_START >>
                                   L2_PAGETABLE_SHIFT]),
               l2e_get_intpte(match));
    }

    match = l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
    if ( !shadow_mode_external(d) &&
         l2e_has_changed(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
                         match, PAGE_FLAG_MASK))
    {
        FAILPT("hypervisor per-domain map inconsistent saw %" PRIpte ", expected (va=%p) %" PRIpte,
               l2e_get_intpte(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
               d->arch.mm_perdomain_pt,
               l2e_get_intpte(match));
    }

#if CONFIG_PAGING_LEVELS == 2
    if ( shadow_mode_external(d) )
        limit = L2_PAGETABLE_ENTRIES;
    else
        limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
#else
    limit = 0; /* XXX x86/64 XXX */
#endif

    /* Check the whole L2. */
    for ( i = 0; i < limit; i++ )
        errors += check_pte(v,
                            (l1_pgentry_t*)(&gpl2e[i]), /* Hmm, dirty ... */
                            (l1_pgentry_t*)(&spl2e[i]),
                            NULL,
                            2, i, 0);

    unmap_domain_page(spl2e);
    unmap_domain_page(gpl2e);

#if 1
    if ( errors )
        printk("check_l2_table returning %d errors\n", errors);
#endif

    return errors;
}
#undef FAILPT

int _check_pagetable(struct vcpu *v, char *s)
{
    struct domain *d = v->domain;
#if CONFIG_PAGING_LEVELS == 4
    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)?
                      v->arch.guest_table : v->arch.guest_table_user);
#else
    pagetable_t pt = v->arch.guest_table;
#endif
    unsigned long gptbase = pagetable_get_paddr(pt);
    unsigned long ptbase_pfn, smfn;
    unsigned long i;
    l2_pgentry_t *gpl2e, *spl2e;
    unsigned long ptbase_mfn = 0;
    int errors = 0, limit, oos_pdes = 0;

    //_audit_domain(d, AUDIT_QUIET);
    shadow_lock(d);

    sh_check_name = s;
    //SH_VVLOG("%s-PT Audit", s);
    sh_l2_present = sh_l1_present = 0;
    perfc_incrc(check_pagetable);

    ptbase_mfn = gptbase >> PAGE_SHIFT;
    ptbase_pfn = mfn_to_gmfn(d, ptbase_mfn);

    if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) )
    {
        printk("%s-PT %lx not shadowed\n", s, gptbase);
        goto out;
    }
    if ( page_out_of_sync(mfn_to_page(ptbase_mfn)) )
    {
        ptbase_mfn = __shadow_status(d, ptbase_pfn, PGT_snapshot);
        oos_pdes = 1;
        ASSERT(ptbase_mfn);
    }

    errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);

    gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn);
    spl2e = (l2_pgentry_t *) map_domain_page(smfn);

    /* Go back and recurse. */
#if CONFIG_PAGING_LEVELS == 2
    if ( shadow_mode_external(d) )
        limit = L2_PAGETABLE_ENTRIES;
    else
        limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
#else
    limit = 0; /* XXX x86/64 XXX */
#endif

    for ( i = 0; i < limit; i++ )
    {
        unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
        unsigned long gl1mfn = gmfn_to_mfn(d, gl1pfn);
        unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);

        if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
        {
            errors += check_l1_table(v, gl1pfn, gl1mfn, sl1mfn, i);
        }
    }

    unmap_domain_page(spl2e);
    unmap_domain_page(gpl2e);

#if 0
    SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
             sh_l2_present, sh_l1_present);
#endif

 out:
    if ( errors )
        BUG();

    shadow_unlock(d);

    return errors;
}

int _check_all_pagetables(struct vcpu *v, char *s)
{
    struct domain *d = v->domain;
    int i;
    struct shadow_status *a;
    unsigned long gmfn;
    int errors = 0;

    shadow_status_noswap = 1;

    sh_check_name = s;
    SH_VVLOG("%s-PT Audit domid=%d", s, d->domain_id);
    sh_l2_present = sh_l1_present = 0;
    perfc_incrc(check_all_pagetables);

    for (i = 0; i < shadow_ht_buckets; i++)
    {
        a = &d->arch.shadow_ht[i];
        while ( a && a->gpfn_and_flags )
        {
            gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);

            switch ( a->gpfn_and_flags & PGT_type_mask )
            {
            case PGT_l1_shadow:
                errors += check_l1_table(v, a->gpfn_and_flags & PGT_mfn_mask,
                                         gmfn, a->smfn, 0);
                break;
            case PGT_l2_shadow:
                errors += check_l2_table(v, gmfn, a->smfn,
                                         page_out_of_sync(mfn_to_page(gmfn)));
                break;
            case PGT_l3_shadow:
            case PGT_l4_shadow:
            case PGT_hl2_shadow:
                BUG(); // XXX - ought to fix this...
                break;
            case PGT_snapshot:
            case PGT_writable_pred:
                break;
            default:
                errors++;
                printk("unexpected shadow type %lx, gpfn=%lx, "
                       "gmfn=%lx smfn=%lx\n",
                       a->gpfn_and_flags & PGT_type_mask,
                       a->gpfn_and_flags & PGT_mfn_mask,
                       gmfn, a->smfn);
                BUG();
            }
            a = a->next;
        }
    }

    shadow_status_noswap = 0;

    if ( errors )
        BUG();

    return errors;
}

#endif // SHADOW_DEBUG
#endif // this code has not been updated for 32pae & 64 bit modes

#if CONFIG_PAGING_LEVELS >= 3
/****************************************************************************/
/* 64-bit shadow-mode code testing */
/****************************************************************************/
/*
 * init_bl2() is for 32-bit VMX guest on 64-bit host
 * Using 1 shadow L4(l3) and 4 shadow L2s to simulate guest L2
 */
static inline unsigned long init_bl2(
    struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
    unsigned int count;
    unsigned long sl2mfn;
    unsigned long smfn;
    struct page_info *page;
    l4_pgentry_t *spl4e;
    void *l2;

    if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l4_shadow))) )
    {
        printk("Couldn't alloc an L4 shadow for pfn=%lx mfn=%lx\n", gpfn, gmfn);
        BUG(); /* XXX Deal gracefully with failure. */
    }

    spl4e = (l4_pgentry_t *)map_domain_page(smfn);

    /* Map the self entry, L4&L3 share the same page */
    spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR);

    /* Allocate 4 shadow L2s */
    page = alloc_domheap_pages(NULL, SL2_ORDER, 0);
    if ( !page )
        domain_crash_synchronous();

    for ( count = 0; count < PAE_L3_PAGETABLE_ENTRIES; count++ )
    {
        sl2mfn = page_to_mfn(page+count);
        l2 = map_domain_page(sl2mfn);
        memset(l2, 0, PAGE_SIZE);
        unmap_domain_page(l2);
        spl4e[count] = l4e_from_pfn(sl2mfn, _PAGE_PRESENT);
    }

    unmap_domain_page(spl4e);

    return smfn;
}
#endif

#if CONFIG_PAGING_LEVELS == 3
static unsigned long shadow_l3_table(
    struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
    unsigned long smfn;
    l3_pgentry_t *spl3e;

    perfc_incrc(shadow_l3_table_count);

    SH_VVLOG("shadow_l4_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);

    if ( SH_L1_HAS_NEXT_PAGE &&
         d->arch.ops->guest_paging_levels == PAGING_L2 )
    {
        return init_bl2(d, gpfn, gmfn);
    }

    if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l3_shadow))) )
    {
            printk("Couldn't alloc an L3 shadow for pfn=%lx mfn=%lx\n", gpfn, gmfn);
            BUG(); /* XXX Deal gracefully with failure. */
    }

    spl3e = (l3_pgentry_t *)map_domain_page(smfn);

    /* Make the self entry */
    spl3e[PAE_SHADOW_SELF_ENTRY] = l3e_from_pfn(smfn, __PAGE_HYPERVISOR);

    if ( (PGT_base_page_table == PGT_l3_page_table) &&
         !shadow_mode_external(d) ) {
        int i;
        unsigned long g2mfn, s2mfn;
        l2_pgentry_t *spl2e;
        l3_pgentry_t *gpl3e;

        /* Get the top entry */
        gpl3e = (l3_pgentry_t *)map_domain_page(gmfn);

        if ( !(l3e_get_flags(gpl3e[L3_PAGETABLE_ENTRIES - 1]) & _PAGE_PRESENT) )
        {
            BUG();
        }

        g2mfn = l3e_get_pfn(gpl3e[L3_PAGETABLE_ENTRIES - 1]);

        /* NB. g2mfn should be same as g2pfn */
        if (!(s2mfn = __shadow_status(d, g2mfn, PGT_l2_shadow))) {
            if ( unlikely(!(s2mfn =
                    alloc_shadow_page(d, g2mfn, g2mfn, PGT_l2_shadow))) ) {
                printk("Couldn't alloc an L2 shadow for pfn=%lx mfn=%lx\n",
                    g2mfn, g2mfn);
                BUG(); /* XXX Deal gracefully with failure. */
            }
        } 

        if (!get_shadow_ref(s2mfn))
            BUG();
            
        /* Map shadow L2 into shadow L3 */
        spl3e[L3_PAGETABLE_ENTRIES - 1] = l3e_from_pfn(s2mfn, _PAGE_PRESENT);
        shadow_update_min_max(smfn, L3_PAGETABLE_ENTRIES -1);

        /*  
         * Xen private mappings. Do the similar things as
         * create_pae_xen_mappings().
         */
        spl2e = (l2_pgentry_t *)map_domain_page(s2mfn);

        /*
         * When we free L2 pages, we need to tell if the page contains
         * Xen private mappings. Use the va_mask part.
         */
        mfn_to_page(s2mfn)->u.inuse.type_info |= 
            (unsigned long) 3 << PGT_score_shift; 

        memset(spl2e, 0, 
               (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)) * sizeof(l2_pgentry_t));

        memcpy(&spl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));       

        for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
            spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
                l2e_from_page(
                    virt_to_page(page_get_owner(mfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
                    __PAGE_HYPERVISOR);
        for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
            spl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
                (l3e_get_flags(gpl3e[i]) & _PAGE_PRESENT) ?
                l2e_from_pfn(l3e_get_pfn(gpl3e[i]), __PAGE_HYPERVISOR) :
                l2e_empty();
       
        unmap_domain_page(spl2e);
        unmap_domain_page(gpl3e);
    }
    unmap_domain_page(spl3e);

    return smfn;
}
#endif /* CONFIG_PAGING_LEVELS == 3 */

#ifndef GUEST_PGENTRY_32
static unsigned long gva_to_gpa_pae(unsigned long gva)
{
    BUG();
    return 43;
}
#endif

#if CONFIG_PAGING_LEVELS == 4
static unsigned long shadow_l4_table(
  struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
    unsigned long smfn;
    l4_pgentry_t *spl4e;

    SH_VVLOG("shadow_l4_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);

    perfc_incrc(shadow_l4_table_count);

    if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
    {
        return init_bl2(d, gpfn, gmfn);
    }

    if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l4_shadow))) )
    {
        printk("Couldn't alloc an L4 shadow for pfn=%lx mfn=%lx\n", gpfn, gmfn);
        BUG(); /* XXX Deal gracefully with failure. */
    }

    spl4e = (l4_pgentry_t *)map_domain_page(smfn);

    /* Install hypervisor and 4x linear p.t. mapings. */
    if ( (PGT_base_page_table == PGT_l4_page_table) &&
      !shadow_mode_external(d) )
    {
        /*
         * We could proactively fill in PDEs for pages that are already
         * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
         * (restriction required for coherence of the accessed bit). However,
         * we tried it and it didn't help performance. This is simpler.
         */
        memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));

        /* Install hypervisor and 2x linear p.t. mapings. */
        memcpy(&spl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));

        spl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
            l4e_from_paddr(__pa(page_get_owner(mfn_to_page(gmfn))->arch.mm_perdomain_l3),
                            __PAGE_HYPERVISOR);

        if ( shadow_mode_translate(d) ) // NB: not external
        {
            spl4e[l4_table_offset(RO_MPT_VIRT_START)] =
                l4e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
                                __PAGE_HYPERVISOR);
        }
        else
            spl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
                l4e_from_pfn(gmfn, __PAGE_HYPERVISOR);

    } else
        memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));

    unmap_domain_page(spl4e);

    ESH_LOG("shadow_l4_table(%lx -> %lx)", gmfn, smfn);
    return smfn;
}
#endif /* CONFIG_PAGING_LEVELS == 4 */

#if CONFIG_PAGING_LEVELS >= 3
/*
 * validate_bl2e_change()
 * The code is for 32-bit HVM guest on 64-bit host.
 * To sync guest L2.
 */

static inline void
validate_bl2e_change(
    struct domain *d,
    guest_root_pgentry_t *new_gle_p,
    pgentry_64_t *shadow_l3,
    int index)
{
    int sl3_idx, sl2_idx;
    unsigned long sl2mfn, sl1mfn;
    pgentry_64_t *sl2_p;

    /* Using guest l2 pte index to get shadow l3&l2 index
     * index: 0 ~ 1023, PAGETABLE_ENTRIES: 512
     */
    sl3_idx = index / (PAGETABLE_ENTRIES / 2);
    sl2_idx = (index % (PAGETABLE_ENTRIES / 2)) * 2;

    sl2mfn = entry_get_pfn(shadow_l3[sl3_idx]);
    sl2_p = (pgentry_64_t *)map_domain_page(sl2mfn);

    validate_pde_change(
        d, *(guest_l2_pgentry_t *)new_gle_p, (l2_pgentry_t *)&sl2_p[sl2_idx]);

    /* Mapping the second l1 shadow page */
    if (entry_get_flags(sl2_p[sl2_idx]) & _PAGE_PRESENT) {
       sl1mfn = entry_get_pfn(sl2_p[sl2_idx]);
       sl2_p[sl2_idx + 1] =
            entry_from_pfn(sl1mfn + 1, entry_get_flags(sl2_p[sl2_idx]));
    }
    else
        sl2_p[sl2_idx + 1] = (pgentry_64_t){0};
    unmap_domain_page(sl2_p);

}

/*
 * This shadow_mark_va_out_of_sync() is for 2M page shadow
 */
static void shadow_mark_va_out_of_sync_2mp(
  struct vcpu *v, unsigned long gpfn, unsigned long mfn, unsigned long writable_pl1e)
{
    struct out_of_sync_entry *entry =
      shadow_mark_mfn_out_of_sync(v, gpfn, mfn);

    entry->writable_pl1e = writable_pl1e;
    ESH_LOG("<shadow_mark_va_out_of_sync_2mp> gpfn = %lx\n", gpfn);
    if ( !get_shadow_ref(writable_pl1e >> L1_PAGETABLE_SHIFT) )
        BUG();
}

static int get_shadow_mfn(struct domain *d, unsigned long gpfn, unsigned long *spmfn, u32 flag)
{
    unsigned long gmfn;
    if ( !(*spmfn = __shadow_status(d, gpfn, flag)) )
    {
        /* This is NOT already shadowed so we need to shadow it. */
        SH_VVLOG("<get_shadow_mfn>: not shadowed");

        gmfn = gmfn_to_mfn(d, gpfn);
        if ( unlikely(!VALID_MFN(gmfn)) )
        {
            // Attempt to use an invalid pfn as an shadow page.
            // XXX this needs to be more graceful!
            BUG();
        }

        if ( unlikely(!(*spmfn =
                  alloc_shadow_page(d, gpfn, gmfn, flag))) )
        {
            printk("<get_shadow_mfn>Couldn't alloc an shadow for pfn=%lx mfn=%lx\n", gpfn, gmfn);
            BUG(); /* XXX Need to deal gracefully with failure. */
        }
        switch(flag) {
            case PGT_l1_shadow:
                perfc_incrc(shadow_l1_table_count);
                break;
            case PGT_l2_shadow:
                perfc_incrc(shadow_l2_table_count);
                break;
            case PGT_l3_shadow:
                perfc_incrc(shadow_l3_table_count);
                break;
            case PGT_hl2_shadow:
                perfc_incrc(shadow_hl2_table_count);
                break;
        }

        return 1;
    } else {
        /* This L1 is shadowed already, but the L2 entry is missing. */
        SH_VVLOG("4b: was shadowed, l2 missing (%lx)", *spmfn);
        return 0;
    }
}

static void shadow_map_into_current(struct vcpu *v,
  unsigned long va, unsigned int from, unsigned int to)
{
    pgentry_64_t gle = {0}, sle;
    unsigned long gpfn, smfn;

    if (from == PAGING_L1 && to == PAGING_L2) {
        shadow_map_l1_into_current_l2(va);
        return;
    }

    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to);
    ASSERT(entry_get_flags(gle) & _PAGE_PRESENT);
    gpfn = entry_get_pfn(gle);

    get_shadow_mfn(v->domain, gpfn, &smfn, shadow_level_to_type(from));

    if ( !get_shadow_ref(smfn) )
        BUG();
    entry_general(v->domain, &gle, &sle, smfn, to);
    __rw_entry(v, va, &gle, GUEST_ENTRY | SET_ENTRY | to);
    __rw_entry(v, va, &sle, SHADOW_ENTRY | SET_ENTRY | to);
}

/*
 * shadow_set_lxe should be put in shadow.h
 */
static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e,
  int create_l2_shadow, int put_ref_check)
{
    struct vcpu *v = current;
    l4_pgentry_t sl4e;
    l3_pgentry_t sl3e;

    __shadow_get_l4e(v, va, &sl4e);
    if (!(l4e_get_flags(sl4e) & _PAGE_PRESENT)) {
        if (create_l2_shadow) {
            perfc_incrc(shadow_set_l3e_force_map);
            shadow_map_into_current(v, va, PAGING_L3, PAGING_L4);
            __shadow_get_l4e(v, va, &sl4e);
        } else {
            printk("For non HVM shadow, create_l1_shadow:%d\n", create_l2_shadow);
        }
    }

    __shadow_get_l3e(v, va, &sl3e);
    if (!(l3e_get_flags(sl3e) & _PAGE_PRESENT)) {
         if (create_l2_shadow) {
            perfc_incrc(shadow_set_l2e_force_map);
            shadow_map_into_current(v, va, PAGING_L2, PAGING_L3);
            __shadow_get_l3e(v, va, &sl3e);
        } else {
            printk("For non HVM shadow, create_l1_shadow:%d\n", create_l2_shadow);
        }
         shadow_update_min_max(l4e_get_pfn(sl4e), l3_table_offset(va));

    }

    if ( put_ref_check ) {
        l2_pgentry_t tmp_sl2e;
        if ( __shadow_get_l2e(v, va, &tmp_sl2e) ) {
            if ( l2e_get_flags(tmp_sl2e) & _PAGE_PRESENT )
                if ( l2e_get_pfn(tmp_sl2e) == l2e_get_pfn(sl2e) ) {
                    put_shadow_ref(l2e_get_pfn(sl2e));
                }
        }

    }

    if (! __shadow_set_l2e(v, va, &sl2e))
        BUG();
    shadow_update_min_max(l3e_get_pfn(sl3e), l2_table_offset(va));
}


/* As 32-bit guest don't support 4M page yet,
 * we don't concern double compile for this function
 */
static inline int l2e_rw_fault(
    struct vcpu *v, l2_pgentry_t *gl2e_p, unsigned long va, int rw)
{
    struct domain *d = v->domain;
    l2_pgentry_t gl2e = *gl2e_p;
    l2_pgentry_t tmp_l2e = gl2e;
    unsigned long start_gpfn = l2e_get_pfn(gl2e);
    unsigned long gpfn, mfn;
    unsigned long l1_mfn, gmfn;
    l1_pgentry_t *l1_p;
    l1_pgentry_t sl1e;
    l1_pgentry_t old_sl1e;
    l2_pgentry_t sl2e;
    u64 nx = 0;
    int put_ref_check = 0;
    /* Check if gpfn is 2M aligned */

    /* Update guest l2e */
    if (rw) {
        ASSERT(l2e_get_flags(gl2e) & _PAGE_RW);
        l2e_add_flags(gl2e, _PAGE_DIRTY | _PAGE_ACCESSED);
    } else {
        l2e_add_flags(gl2e, _PAGE_ACCESSED);
    }

    l2e_remove_flags(tmp_l2e, _PAGE_PSE);
    if (l2e_get_flags(gl2e) & _PAGE_NX) {
        l2e_remove_flags(tmp_l2e, _PAGE_NX);
        nx = 1ULL << 63;
    }


    /* Get the shadow l2 first */
    if ( !__shadow_get_l2e(v, va, &sl2e) )
        sl2e = l2e_empty();

    l1_mfn = __shadow_status(d, start_gpfn | nx, PGT_fl1_shadow);

    /* Check the corresponding l2e */
    if (l1_mfn) {
        /* Why it is PRESENT?*/
        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) &&
                l2e_get_pfn(sl2e) == l1_mfn) {
            ESH_LOG("sl2e PRSENT bit is set: %lx, l1_mfn = %lx\n", l2e_get_pfn(sl2e), l1_mfn);
        } else {
            put_ref_check = 1;
            if (!get_shadow_ref(l1_mfn))
                BUG();
        }
        l1_p = (l1_pgentry_t *)map_domain_page(l1_mfn);
        sl2e = l2e_from_pfn(l1_mfn, l2e_get_flags(tmp_l2e));
    } else {
        /* Allocate a new page as shadow page table if need */
        gmfn = gmfn_to_mfn(d, start_gpfn);
        l1_mfn = alloc_shadow_page(d, start_gpfn | nx, gmfn, PGT_fl1_shadow);
        if (unlikely(!l1_mfn)) {
            BUG();
        }

        if (!get_shadow_ref(l1_mfn))
            BUG();
        l1_p = (l1_pgentry_t *)map_domain_page(l1_mfn );
        sl2e = l2e_from_pfn(l1_mfn, l2e_get_flags(tmp_l2e));
        memset(l1_p, 0, PAGE_SIZE);
        ESH_LOG("Alloc a shadow page: %lx\n", l1_mfn);
    }

    ESH_LOG("<%s>: sl2e = %lx\n", __func__, l2e_get_intpte(sl2e));
    /* Map the page to l2*/
    shadow_set_l2e_64(va, sl2e, 1, put_ref_check);

    if (l2e_get_flags(gl2e) & _PAGE_NX)
        l2e_add_flags(tmp_l2e, _PAGE_NX);

    /* Propagate the shadow page table, i.e. setting sl1e */
    for (gpfn = start_gpfn;
      gpfn < (start_gpfn + L1_PAGETABLE_ENTRIES); gpfn++) {

        mfn = gmfn_to_mfn(d, gpfn);

        if ( unlikely(!VALID_MFN(mfn)) )
        {
            continue;
        }

        sl1e = l1e_from_pfn(mfn, l2e_get_flags(tmp_l2e));

        if (!rw) {
            if ( shadow_mode_log_dirty(d) ||
              !(l2e_get_flags(gl2e) & _PAGE_DIRTY) || mfn_is_page_table(mfn) )
            {
                l1e_remove_flags(sl1e, _PAGE_RW);
            }
        } else {
            /* __mark_dirty(d, gmfn); */
        }
       // printk("<%s> gpfn: %lx, mfn: %lx, sl1e: %lx\n", __func__, gpfn, mfn, l1e_get_intpte(sl1e));
        /* The shadow entrys need setup before shadow_mark_va_out_of_sync()*/
        old_sl1e = l1_p[gpfn - start_gpfn];

        if ( l1e_has_changed(old_sl1e, sl1e, _PAGE_RW | _PAGE_PRESENT) )
        {
            if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
              !shadow_get_page_from_l1e(sl1e, d) ) {
                ESH_LOG("%lx, mfn: %lx why make me empty, start_pfn: %lx, gpfn: %lx\n", l1e_get_intpte(sl1e),mfn, start_gpfn, gpfn);
                sl1e = l1e_empty();
            }
            if ( l1e_get_flags(old_sl1e) & _PAGE_PRESENT )
                put_page_from_l1e(old_sl1e, d);
        }

        l1_p[gpfn - start_gpfn] = sl1e;

        if (rw) {
            /* shadow_mark_va_out_of_sync() need modificatin for 2M pages*/
            if ( mfn_is_page_table(mfn) )
                shadow_mark_va_out_of_sync_2mp(v, gpfn, mfn,
                  l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * (gpfn - start_gpfn)));
        }
    }

    unmap_domain_page(l1_p);
    return 1;

}

/*
 * Check P, R/W, U/S bits in the guest page table.
 * If the fault belongs to guest return 1,
 * else return 0.
 */
#if defined( GUEST_PGENTRY_32 )
static inline int guest_page_fault(
    struct vcpu *v,
    unsigned long va, unsigned int error_code,
    guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
{
    /* The following check for 32-bit guest on 64-bit host */

    __guest_get_l2e(v, va, gpl2e);

    /* Check the guest L2 page-table entry first*/
    if ( unlikely(!(guest_l2e_get_flags(*gpl2e) & _PAGE_PRESENT)) )
        return 1;

    if ( error_code & ERROR_W ) 
    {
        if ( unlikely(!(guest_l2e_get_flags(*gpl2e) & _PAGE_RW)) )
            return 1;
    }

    if ( error_code & ERROR_U ) 
    {
        if ( unlikely(!(guest_l2e_get_flags(*gpl2e) & _PAGE_USER)) )
            return 1;
    }

    if ( guest_l2e_get_flags(*gpl2e) & _PAGE_PSE )
        return 0;

    __guest_get_l1e(v, va, gpl1e);

    /* Then check the guest L1 page-table entry */
    if ( unlikely(!(guest_l1e_get_flags(*gpl1e) & _PAGE_PRESENT)) )
        return 1;

    if ( error_code & ERROR_W ) 
    {
        if ( unlikely(!(guest_l1e_get_flags(*gpl1e) & _PAGE_RW)) )
            return 1;
    }

    if ( error_code & ERROR_U ) 
    {
        if ( unlikely(!(guest_l1e_get_flags(*gpl1e) & _PAGE_USER)) )
            return 1;
    }

    return 0;
}
#else
static inline int guest_page_fault(
    struct vcpu *v,
    unsigned long va, unsigned int error_code,
    guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
{
    struct domain *d = v->domain;
    pgentry_64_t gle = { 0 };
    unsigned long gpfn = 0, mfn;
    int i;

    ASSERT( d->arch.ops->guest_paging_levels >= PAGING_L3 );

#if CONFIG_PAGING_LEVELS == 4
    if ( d->arch.ops->guest_paging_levels == PAGING_L4 ) 
    {
        __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | PAGING_L4);
        if ( unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)) )
            return 1;

        if ( error_code & ERROR_W )
        {
            if ( unlikely(!(entry_get_flags(gle) & _PAGE_RW)) )
                return 1;
        }

        if ( error_code & ERROR_U )
        {
            if ( unlikely(!(entry_get_flags(gle) & _PAGE_USER)) )
                return 1;
        }
        gpfn = entry_get_pfn(gle);
    }
#endif

#if CONFIG_PAGING_LEVELS >= 3
    if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 
    {
        gpfn = pagetable_get_pfn(v->arch.guest_table);
    }
#endif

    for ( i = PAGING_L3; i >= PAGING_L1; i-- ) 
    {
        pgentry_64_t *lva;
        /*
         * If it's not external mode, then mfn should be machine physical.
         */
        mfn = gmfn_to_mfn(d, gpfn);

        lva = (pgentry_64_t *) map_domain_page(mfn);
        gle = lva[table_offset_64(va, i)];
        unmap_domain_page(lva);

        gpfn = entry_get_pfn(gle);

        if ( unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)) )
            return 1;

        if ( i < PAGING_L3 ) 
        {
            if ( error_code & ERROR_W ) 
            {
                if ( unlikely(!(entry_get_flags(gle) & _PAGE_RW)) ) 
                {
                    if ( i == PAGING_L1 )
                        if ( gpl1e )
                            gpl1e->l1 = gle.lo;
                    return 1;
                }
            }
            if ( error_code & ERROR_U ) 
            {
                if ( unlikely(!(entry_get_flags(gle) & _PAGE_USER)) )
                    return 1;
            }
        }

        if ( i == PAGING_L2 ) 
        {
            if ( gpl2e )
                gpl2e->l2 = gle.lo;
            if ( likely(entry_get_flags(gle) & _PAGE_PSE) )
                return 0;
        }

        if ( i == PAGING_L1 )
            if ( gpl1e )
                gpl1e->l1 = gle.lo;
    }

    return 0;

}
#endif

static int shadow_fault_64(unsigned long va, struct cpu_user_regs *regs)
{
    struct vcpu *v = current;
    struct domain *d = v->domain;
    guest_l2_pgentry_t gl2e;
    guest_l1_pgentry_t gl1e, orig_gl1e;
    l1_pgentry_t sl1e;

    gl1e = guest_l1e_empty(); gl2e = guest_l2e_empty();

    sl1e = l1e_empty();

    perfc_incrc(shadow_fault_calls);

    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n",
            va, regs->eip, regs->error_code);

    /*
     * Don't let someone else take the guest's table pages out-of-sync.
     */
    shadow_lock(d);

    /*
     * STEP 1. Check to see if this fault might have been caused by an
     *         out-of-sync table page entry, or if we should pass this
     *         fault onto the guest.
     */
    __shadow_sync_va(v, va);

    /*
     * STEP 2. Check if the fault belongs to guest
     */
    if ( guest_page_fault(v, va, regs->error_code, &gl2e, &gl1e) ) 
    {
        if ( unlikely(shadow_mode_log_dirty(d)) && l1e_get_intpte(gl1e) != 0 )
            goto check_writeable;
        
        goto fail;
    }

    if ( unlikely((guest_l2e_get_flags(gl2e) & _PAGE_PSE)) ) 
        goto pse;

    /*
     * Handle 4K pages here
     */
check_writeable:
    orig_gl1e = gl1e;
    
    /* Write fault? */
    if ( regs->error_code & 2 ) 
    {
        int allow_writes = 0;

        if ( unlikely(!(guest_l1e_get_flags(gl1e) & _PAGE_RW)) )
        {
            if ( shadow_mode_page_writable(va, regs, l1e_get_pfn(gl1e)) )
            {
                allow_writes = 1;
                l1e_add_flags(gl1e, _PAGE_RW);
            }
            else
            {
                /* Write fault on a read-only mapping. */
                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")", 
                         l1e_get_intpte(gl1e));
                perfc_incrc(shadow_fault_bail_ro_mapping);
                goto fail;
            }
        }

        if ( !l1pte_write_fault(v, &gl1e, &sl1e, va) ) 
        {
            SH_VVLOG("shadow_fault - EXIT: l1pte_write_fault failed");
            perfc_incrc(write_fault_bail);
            shadow_unlock(d);
            return 0;
        }
 
        if (allow_writes)
            l1e_remove_flags(gl1e, _PAGE_RW);
    }
    else 
    {
        if ( !l1pte_read_fault(d, &gl1e, &sl1e) )
        {
            SH_VVLOG("shadow_fault - EXIT: l1pte_read_fault failed");
            perfc_incrc(read_fault_bail);
            shadow_unlock(d);
            return 0;
        }
    }

    /*
     * STEP 3. Write the modified shadow PTE and guest PTE back to the tables
     */
    if ( l1e_has_changed(orig_gl1e, gl1e, PAGE_FLAG_MASK) )
    {
        if (unlikely(!__guest_set_l1e(v, va, &gl1e))) 
            domain_crash_synchronous();

        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gl2e)));
    }

    shadow_set_l1e_64(va, (pgentry_64_t *)&sl1e, 1);

    perfc_incrc(shadow_fault_fixed);
    d->arch.shadow_fault_count++;

    shadow_unlock(d);

    return EXCRET_fault_fixed;

pse:
    /*
     * Handle 2M pages here
     */
    if ( unlikely(!shadow_mode_external(d)) )
        BUG();

    /* Write fault? */
    if ( regs->error_code & 2 ) 
    {
        if ( !l2e_rw_fault(v, (l2_pgentry_t *)&gl2e, va, WRITE_FAULT) ) 
        {
            goto fail;
        }
    } 
    else 
    {
        l2e_rw_fault(v, (l2_pgentry_t *)&gl2e, va, READ_FAULT);
    }

    /*
     * STEP 3. Write guest/shadow l2e back
     */

    if ( unlikely(!__guest_set_l2e(v, va, &gl2e)) ) 
    {
        domain_crash_synchronous();
    }

    /*
     * Todo: if necessary, record the page table page as dirty
     */

    perfc_incrc(shadow_fault_fixed);
    d->arch.shadow_fault_count++;

    shadow_unlock(d);

    return EXCRET_fault_fixed;
fail:
    shadow_unlock(d);
    ESH_LOG("Guest fault~~~\n");
    return 0;
}

static void shadow_invlpg_64(struct vcpu *v, unsigned long va)
{
    struct domain *d = v->domain;
    l1_pgentry_t  sl1e, old_sl1e;

    shadow_lock(d);

    __shadow_sync_va(v, va);

    if ( shadow_mode_external(d) && __shadow_get_l1e(v, va, &old_sl1e) )
        if ( l1e_get_flags(old_sl1e) & _PAGE_PRESENT )
            put_page_from_l1e(old_sl1e, d);

    sl1e = l1e_empty();
    __shadow_set_l1e(v, va, &sl1e);

    shadow_unlock(d);
}

static unsigned long gva_to_gpa_64(unsigned long gva)
{
    struct vcpu *v = current;
    guest_l1_pgentry_t gl1e = {0};
    guest_l2_pgentry_t gl2e = {0};
    unsigned long gpa;

    if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
        return 0;

    if (guest_l2e_get_flags(gl2e) & _PAGE_PSE)
        gpa = guest_l2e_get_paddr(gl2e) + (gva & ((1 << GUEST_L2_PAGETABLE_SHIFT) - 1));
    else
        gpa = guest_l1e_get_paddr(gl1e) + (gva & ~PAGE_MASK);

    return gpa;
}

/*
 * The naming convention of the shadow_ops:
 * MODE_<pgentry size>_<guest paging levels>_HANDLER
 */
#ifndef GUEST_PGENTRY_32
struct shadow_ops MODE_64_3_HANDLER = {
    .guest_paging_levels        = 3,
    .invlpg                     = shadow_invlpg_64,
    .fault                      = shadow_fault_64,
    .update_pagetables          = shadow_update_pagetables,
    .sync_all                   = sync_all,
    .remove_all_write_access    = remove_all_write_access,
    .do_update_va_mapping       = do_update_va_mapping,
    .mark_mfn_out_of_sync       = mark_mfn_out_of_sync,
    .is_out_of_sync             = is_out_of_sync,
    .gva_to_gpa                 = gva_to_gpa_pae,
};

struct shadow_ops MODE_64_4_HANDLER = {
    .guest_paging_levels        = 4,
    .invlpg                     = shadow_invlpg_64,
    .fault                      = shadow_fault_64,
    .update_pagetables          = shadow_update_pagetables,
    .sync_all                   = sync_all,
    .remove_all_write_access    = remove_all_write_access,
    .do_update_va_mapping       = do_update_va_mapping,
    .mark_mfn_out_of_sync       = mark_mfn_out_of_sync,
    .is_out_of_sync             = is_out_of_sync,
    .gva_to_gpa                 = gva_to_gpa_64,
};
#endif /* GUEST_PGENTRY_32 */
#endif /* CONFIG_PAGING_LEVELS >= 3 */


#if CONFIG_PAGING_LEVELS == 2
struct shadow_ops MODE_32_2_HANDLER = {
    .guest_paging_levels        = 2,
    .invlpg                     = shadow_invlpg_32,
    .fault                      = shadow_fault_32,
    .update_pagetables          = shadow_update_pagetables,
    .sync_all                   = sync_all,
    .remove_all_write_access    = remove_all_write_access,
    .do_update_va_mapping       = do_update_va_mapping,
    .mark_mfn_out_of_sync       = mark_mfn_out_of_sync,
    .is_out_of_sync             = is_out_of_sync,
    .gva_to_gpa                 = gva_to_gpa_64,
};
#endif

#if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) ) ||  \
    ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )

/* 
 * Use GUEST_PGENTRY_32 to force PAE_SHADOW_SELF_ENTRY for L4.
 *
 * Very simple shadow code to handle 1:1 direct mapping for guest 
 * non-paging code, which actually is running in PAE/vm86 mode with 
 * paging-enabled.
 *
 * We expect that the top level (L3) page has been allocated and initialized.
 */
int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
{
    struct vcpu *v = current;
    struct domain *d = v->domain;
    l3_pgentry_t sl3e;
    l2_pgentry_t sl2e;
    l1_pgentry_t sl1e;
    unsigned long mfn, smfn;
    struct page_info *page;

    /*
     * If the faulting address is within the MMIO range, we continue
     * on handling the #PF as such.
     */
    if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN )
    {
         goto fail;
    }

    shadow_lock(d);

    __shadow_get_l3e(v, vpa, &sl3e);

    if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) ) 
    {
        page = alloc_domheap_page(NULL);
        if ( !page )
            goto fail; 
        smfn = page_to_mfn(page);
        sl3e = l3e_from_pfn(smfn, _PAGE_PRESENT);
        __shadow_set_l3e(v, vpa, &sl3e);
    }

    __shadow_get_l2e(v, vpa, &sl2e);

    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) ) 
    {
        page = alloc_domheap_page(NULL);
        if ( !page )
            goto fail; 
        smfn = page_to_mfn(page);

        sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
        __shadow_set_l2e(v, vpa, &sl2e);
    }

    __shadow_get_l1e(v, vpa, &sl1e);

    if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) ) 
    {
        sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
        __shadow_set_l1e(v, vpa, &sl1e);
    } 

    shadow_unlock(d);
    return EXCRET_fault_fixed;

fail:
    return 0;
}
#endif

/*
 * Local variables:
 * mode: C
 * c-set-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */