OpenGL 3+ failure on Intel/AMD C++

Gnampf

I am working on an updated renderer for an old game engine and I have a very frustrating visual problem on Intel and AMD cards. Ok, I found tons of pages about Intel and OpenGL trouble, but I'd expect it at least working also on AMD- on NVidia cards, no matter what generation, it seems to work flawless. I am stripping some code out of the project, so please tell me if there is something missing. At first I assumed it to be originated by old GL fixed pipeline remnants, but after cleaning this up the problem still remains. Here is a shot of what it looks like: OpenGL fail on AMD/Intel

I have been working through the debuggers and debugging methods listed on https://www.opengl.org/wiki/Debugging_Tools , I used classic glGetError line by line, I used AMD's CodeXL and GPUProfiler. Although some of these tools are rather complicated and I am not familiar with everything- none of these things is showing a warning or an error.

Ok, to put some information in: context creation looks currently like this:

PIXELFORMATDESCRIPTOR pfd =
{
    sizeof(PIXELFORMATDESCRIPTOR),
    1,
    PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
    PFD_TYPE_RGBA,
    DesiredColorBits,
    0, 0, 0, 0, 0, 0,
    0, 0,
    0, 0, 0, 0, 0,
    DesiredDepthBits,
    DesiredStencilBits,
    0,
    PFD_MAIN_PLANE,
    0,
    0, 0, 0
};

INT nPixelFormat = ChoosePixelFormat( hDC, &pfd );
check(nPixelFormat);
verify(SetPixelFormat( hDC, nPixelFormat, &pfd ));

// oldstyle context to init glew.
HGLRC tempContext = wglCreateContext(hDC);
wglMakeCurrent(hDC, tempContext);

//init glew
glewExperimental = GL_TRUE;
GLenum err = glewInit();
if (GLEW_OK != err)
    appErrorf(TEXT("Error: Init glew failed: %s"), appFromAnsi((char*)glewGetErrorString(err)));
else debugf(NAME_Init, TEXT("Glew successfully initialized."));

//Now init pure OpenGL >= 3.3 context.
if (WGLEW_ARB_create_context && WGLEW_ARB_pixel_format)
{
    wglMakeCurrent(NULL, NULL);
    wglDeleteContext(tempContext);

    pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
    pfd.nVersion = 1;
    pfd.dwFlags = PFD_DOUBLEBUFFER | PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW;
    pfd.iPixelType = PFD_TYPE_RGBA;
    pfd.cColorBits = DesiredColorBits;
    pfd.cDepthBits = DesiredDepthBits;
    pfd.iLayerType = PFD_MAIN_PLANE;

    const INT iPixelFormatAttribList[] =
    {
        WGL_DRAW_TO_WINDOW_ARB, GL_TRUE,
        WGL_SUPPORT_OPENGL_ARB, GL_TRUE,
        WGL_DOUBLE_BUFFER_ARB, GL_TRUE,
        WGL_PIXEL_TYPE_ARB, WGL_TYPE_RGBA_ARB,
        WGL_COLOR_BITS_ARB, DesiredColorBits,
        WGL_DEPTH_BITS_ARB, DesiredDepthBits,
        WGL_STENCIL_BITS_ARB, DesiredStencilBits,
        0 // End of attributes list
    };

    INT ContextFlags=0;
    if (UseOpenGLDebug)
         ContextFlags = WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB | WGL_CONTEXT_DEBUG_BIT_ARB;
    else ContextFlags = WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB;

    INT iContextAttribs[] =
    {
        WGL_CONTEXT_MAJOR_VERSION_ARB, MajorVersion,
        WGL_CONTEXT_MINOR_VERSION_ARB, MinorVersion,
        WGL_CONTEXT_FLAGS_ARB, ContextFlags,
        0 // End of attributes list
    };

    INT iPixelFormat, iNumFormats;
    wglChoosePixelFormatARB(hDC, iPixelFormatAttribList, NULL, 1, &iPixelFormat, (UINT*)&iNumFormats);

    // pfd oldstyle crap...
    debugf(NAME_Init, TEXT("DesiredColorBits: %i"), DesiredColorBits);
    debugf(NAME_Init, TEXT("DesiredDepthBits: %i"), DesiredDepthBits);
    debugf(NAME_Init, TEXT("DesiredStencilBits: %i"), DesiredStencilBits);
    debugf(NAME_Init, TEXT("PixelFormat: %i"), iPixelFormat);
    if (!SetPixelFormat(hDC, iPixelFormat, &pfd))
    {
        appErrorf(TEXT("Error: SetPixelFormat failed."));
        return;
    }

    hRC = wglCreateContextAttribsARB(hDC, 0, iContextAttribs);
}
else  appErrorf(TEXT("Error: Init glew failed: %s"), appFromAnsi((char*)glewGetErrorString(err)));

if(hRC)
{
    MakeCurrent();

    debugf(NAME_Init, TEXT("GL_VENDOR     : %s"), appFromAnsi((const ANSICHAR *)glGetString(GL_VENDOR)));
    debugf(NAME_Init, TEXT("GL_RENDERER   : %s"), appFromAnsi((const ANSICHAR *)glGetString(GL_RENDERER)));
    debugf(NAME_Init, TEXT("GL_VERSION    : %s"), appFromAnsi((const ANSICHAR *)glGetString(GL_VERSION)));
    debugf(NAME_Init, TEXT("GLEW Version  : %s"), appFromAnsi((const ANSICHAR *)glewGetString(GLEW_VERSION)));

    int NumberOfExtensions=0;
    glGetIntegerv(GL_NUM_EXTENSIONS, &NumberOfExtensions);
    for (INT i = 0; i<NumberOfExtensions; i++)
    {
        FString ExtensionString = appFromAnsi((const ANSICHAR *)glGetStringi(GL_EXTENSIONS, i));
        debugf(NAME_DevLoad, TEXT("GL_EXTENSIONS(%i) : %s"), i, ExtensionString);
    }
    debugf(NAME_Init, TEXT("OpenGL %i.%i context initialized!"), MajorVersion,MinorVersion);
}
else
    appErrorf(TEXT("Error: No OpenGL %i.%i context support."), MajorVersion, MinorVersion);

if( ShareLists && AllContexts.Num() )
    verify(wglShareLists(AllContexts(0),hRC)==1);
AllContexts.AddItem(hRC);

if (UseOpenGLDebug)
{

    glDebugMessageCallbackARB(&UXOpenGLRenderDevice::DebugCallback, NULL);
    glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB);
    GWarn->Logf(TEXT("OpenGL debugging enabled, this can cause severe performance drain!"));
}

I tried a couple of methods, and ended up with glew. However, I also tried more simple versions just with initing glew, but also other things with glad and SDL2 for creating the context. The result was always the same.

Matrices are set up like that:

viewMat = glm::scale(glm::mat4(1.0f), glm::vec3(1.0f, -1.0f, -1.0f));
modelMat = glm::mat4(1.0f);
projMat = glm::frustum(-RProjZ*zNear, +RProjZ*zNear, -Aspect*RProjZ*zNear, +Aspect*RProjZ*zNear, 1.0f*zNear, zFar);

here is one of the drawing routines, there are more complex functions, but it already happens with this very simple one:

VertexShader

#version 330
layout (location = 0) in vec3 v_coord;      // == gl_Vertex
layout (location = 2) in vec2 TexCoords;

layout(std140) uniform GlobalMatrices
{
    mat4 modelviewprojMat;
};

uniform vec4 DrawColor;
out vec4 vDrawColor;
out vec2 vTexCoords;

void main(void)
{
    vTexCoords=TexCoords;
    vDrawColor=DrawColor;

    gl_Position = modelviewprojMat * vec4(v_coord, 1.0);
}

FragmentShader

#version 330
uniform sampler2D Texture0;
uniform float AlphaThreshold;

in vec4 vDrawColor;
in vec2 vTexCoords;

out vec4 FragColor;

void main(void)
{
    vec4 Color = texture(Texture0, vTexCoords); 

    if(Color.a <= AlphaThreshold) 
        discard;

    FragColor = Color*vDrawColor; 
}

Here I tried also various versions, from 150 to 430, core, compatibility and even a quick jump to OpenGL ES.

here is the corresponding C++ function

DrawTileVerts(FLOAT* verts, UINT size, FLOAT* tex, UINT texsize, FPlane Color)
{
    CHECK_GL_ERROR();
    glBindVertexArray(DrawTileVertsVao);
    // Verts
    glBindBuffer(GL_ARRAY_BUFFER, DrawTileVertBuffer);
    glBufferData(GL_ARRAY_BUFFER, sizeof(float) * size, verts, GL_DYNAMIC_DRAW);
    glEnableVertexAttribArray(VERTEX_COORD_ATTRIB);
    glVertexAttribPointer(VERTEX_COORD_ATTRIB, 3, GL_FLOAT, GL_FALSE, sizeof(float) * FloatsPerVertex, 0);

    // Textures
    glBindBuffer(GL_ARRAY_BUFFER, DrawTileTexBuffer);
    glBufferData(GL_ARRAY_BUFFER, sizeof(float) * texsize, tex, GL_DYNAMIC_DRAW);
    glEnableVertexAttribArray(TEXTURE_COORD_ATTRIB);
    glVertexAttribPointer(TEXTURE_COORD_ATTRIB, 2, GL_FLOAT, GL_FALSE, sizeof(float) * TexCoords2D, 0);

    glUniform4f(DrawTile_DrawColor, Color.X, Color.Y, Color.Z, Color.W);

    glDrawArrays(GL_TRIANGLE_FAN, 0, size);

    // Clean up
    glDisableVertexAttribArray(VERTEX_COORD_ATTRIB);
    glDisableVertexAttribArray(TEXTURE_COORD_ATTRIB);
    CHECK_GL_ERROR();
    glBindVertexArray(0);
}

I am more than grateful for any hint what I can try yet, how to get more information or what else I could try. Also, if you need something more code, let me know, as this is missing some parts yet. I left out texturing for now, but I am quite sure this is not related. Also it happens without the AlphaThreshold in the shaders, this I added later.

Columbo

In your DrawTileVerts, you're passing in a parameter called 'size'.

From the way you're passing it into your call to glDrawArrays, it looks like it ought to be the number of vertices.

But from the way you're using it to calculate the size to pass into glBufferData, you're treating it as the number of floats.

I think your call to glBufferData ought to be

glBufferData(GL_ARRAY_BUFFER, sizeof(float) * size * FloatsPerVertex, verts, GL_DYNAMIC_DRAW);

Or maybe your call to glDrawArrays ought to be:

glDrawArrays(GL_TRIANGLE_FAN, 0, size / FloatsPerVertex);

There's enough info to see that there's an inconsistency, but not enough info to work out which would be the correct fix.

I would also double check the way you're using texsize, because that may be wrong too, but without seeing more code, I can't tell.

Collected from the Internet

Please contact [email protected] to delete if infringement.

edited at
0

Comments

0 comments
Login to comment

Related