Framebuffer ojects, rendering to textures for multiple texture units

daveb

Newcomer
Hi
I am getting great results after reading the post
http://www.beyond3d.com/forum/showthread.php?p=530048#post530048
and setting up code to work with 16bit depths. But I have a question about using multiple texture units. Until I started trying to use FBOs to improve shadow jagged edges (which it has done, excellent), I was using multiple texture units to handle the shadow maps for multiple lights in one render pass. With FBOs for the shadowmaps, I am now struggling to figure out how to make it render directly to the different shadowmap textures which are intended for use by the multiple texture units. In the end, I got it to work by using the FBO to generate the depthmap into a fixed texture object, then copying that into the texture object used by the other texture unit.

I may be confused about the semantics of texture objects in the presence of multiple texture units...???

Anyhow, here's the code with the question repeated at the bottom in more detail:

Code:
// Here's the code #ifdef'd to show broken and working states, (working state uses glCopyTexImage2D)

//---- during init only
glGenFramebuffersEXT(1, &m_Framebuffer);
glGenRenderbuffersEXT(1, &m_DepthRenderbuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, m_DepthRenderbuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT16, shadowsize, shadowsize);

//---- rendering shadowmaps only when scene geometry changes, not during view position changes

#ifdef BROKEN
	SetCurrentTextureUnit(n+1);			// shadow map for light 0,1,2... uses texture unit 1,2,3...
										// just calls glActiveTextureARB(GL_TEXTURE3_ARB); for n=2 etc
	GLuint tex = ShadowTextureID(n);	// previously have done glGenTextures to allocate these
#else
	GLuint tex = m_tex;					// a fixed texture number allocated during init
#endif

glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_LUMINANCE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, shadowsize, shadowsize, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, m_Framebuffer);
glDrawBuffer(GL_NONE);	// no color buffer dest
glReadBuffer(GL_NONE);	// no color buffer src
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, tex);

CheckFramebufferStatus();

glBindTexture(GL_TEXTURE_2D, 0);	// don't leave this texture bound (?unsure if this is correct but it appears to make no diff)
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);	// want depths only

DrawScene();

#ifndef BROKEN
	SetCurrentTextureUnit(n+1);		// shadow map for light 0,1,2... uses texture unit 1,2,3...
	ShadowBindTexture(n);
	glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 0, 0, shadowsize, shadowsize, 0);
#endif

glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);

//-----
// The scene is then rendered for each frame with the texture units active for shadow mapping


//-----
// ShadowBindTexture(n) is these steps:
	glBindTexture(GL_TEXTURE_2D, ShadowTextureID(n));
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
	glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY);
	if (m_bHas_GL_ARB_shadow_ambient)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FAIL_VALUE_ARB, 0.5f);
	glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
	glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
	glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
	glTexGeni(GL_Q, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);

Questions
---------

1. My main question is how do I render to a texture that is in a different texture unit to the one that is active while I am generating the shadowmap?
That is, I want to get rid of the call to glCopyTexImage2D.
Things work fine (as above) when I render to a depth texture which I allocate, then glCopyTexImage2D
to copy the depth texture generated into the other texture unit.

But if I try to render directly into the texture 'tex' and just bind it then it doesn't work - it creates a bunch of stripes for shadows.
I may be just confused about the semantics of creating and binding/unbinding texture objects with multiple texture units.
I used the above code just changing the fixed reference to

Another question:

2. When defining the texture I'm now using
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, shadowsize, shadowsize, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
I was having all sorts of problems until I saw a post in this forum saying that ATI only supports 16 bit depths where I was using 24.
Now that I have changed to 24, it works fine, but I wondered about the effect of the second last param which is now GL_UNSIGNED_INT.
In some other forum discussion I saw this used:
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, shadowsize, shadowsize, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, NULL);
Notice it is non-specific (GL_DEPTH_COMPONENT without a 16 or 24 suffix)... what does that do?
Also it uses GL_UNSIGNED_BYTE ... is that important?

= = =

Footnote: I have an ATI Radeon 9700 and am using OmegaDrivers.net (Catalyst version 5.10a, driver ver 6.14.10.6575).
I was getting crashes in the glBindFramebufferEXT call (atioglxx.dll exceptions) until I found a helpful note at
http://www.gamedev.net/community/forums/topic.asp?topic_id=336643
whereupon I turned off the floating point exceptions during these calls and things are working now.
 
Well, you have a lot of questions, I'll do my best to get some of them answered.

First, if you want to create multiple shadow maps, you should probably create multiple FBOs with each bound to the depth texture you intend to use. You wont need to use glCopyTexSubimage any longer.

Second, yes the Radeon 9700 can only support 16 bit depth textures. The next to last parameter to glTexImage is irrelevant for this. It is the internal format parameter that matters.

-Evan
 
Thanks, I guess I still don't follow the mechanics of rendering to a texture bound into another texture unit.

Let me ask it this way... look at the sequence:

1. Create texture object bound to texture unit #1 and setup FBO, bind tex as depth buffer
2. Render scene to FBO, during rendering we may use texture unit #0

then we are ready to render frames... FBO is now out of the picture...

3. setup texture unit #1 as depth texture for shadow mapping
4. Render scene definitely using texture unit #0 for real textures in scene

Before I started to use FBOs this all worked - no step 1, step 2 rendered to the normal window, then I grabbed the depth texture at the end of step 2 by setting the active texture unit to #1 and doing a glCopyTexImage.

Now, using FBOs, I can get it to work doing a render to the FBO with a texture attached as the "depth attachment", but only if I create the texture object by doing the glBindTexture while texture unit #0 is active. Then to get the depth map across to texture unit #1 I still do a glCopyTexImage at the end of step 2 - see the #ifdef's in the code.

My attempt to do things the "right way" (ie FBO rendering straight to a texture object bound to texture unit #1 and no copy - as per steps 1234 above) is shown in the code as #ifdef BROKEN. It executes but doesn't end up with a sensible bitmap, I just get stripes for shdows.

This is what is confusing me ... the question is, are texture objects created with some knowledge of which texture unit# they belong to, or are they truly texture unit independent? Is what I am doing in the #ifdef BROKEN code flawed in some way?
I would have thought I could make texture unit #1 active before doing the glBindTexture, then go back to texture unit #0 to do the render, which goes to the FBO with the depth attachment hence populating the depth texture bound to unit #1 and making it unnecessary to do any glCopyTexImage. But I cannot get this to work.

Perhaps I'm missing something about the concept of binding textures to multiple texture units??
 
End of the week and tired, but it looks like you're doing a conceptual error.

When you bind the FBO you don't care about textures at all, you just replace the framebuffer by another.

When you render the scene after having rendered to the FBO (and so to its attachments, which are either rendertextures or renderbuffers), you can bind some of the attachments as textures, to whichever texture unit you want.

I'll check the specs but what the hell is that RENDER_BUFFER for ?
AFAIR you can't use it as a texture, because it's a BUFFER (offscreen rendering only), you need to create a TEXTURE if you want to use it as target and source. (ie render to, then use it as a texture.)

--re reading your code then checking specs--

<edit>
BTW, are you checking OpenGL errors ?
I bet you have some ^^ (can use glIntercept for that if you want)
 
Okay just done a quick check but your texture should be bound to the FBO with glFramebufferTexture2DEXT.

I guess you get the right to reread the specs and check the example at the bottom of them, for memory the specs are here :
http://oss.sgi.com/projects/ogl-sample/registry/

(And yes, I know I sound like a lame linux user telling you RTFM, it's awefull but I'm rather tired right now.)
 
OK, you may have nailed it... I am using glFramebufferRenderbufferEXT, but the example (7) in the spec at
http://oss.sgi.com/projects/ogl-sample/registry/EXT/framebuffer_object.txt
uses glFrameBufferTexture2DEXT. [Above link is #310 at your link which is correct].
I even had this highlighted in a printout I have been pouring over - I think I tried and couldn't get it to work at an earlier stage, but may have been due to other glitches now resolved. (eg DEPTH16). Will try again.

The example (7) in the spec starts with the casual words
"// Given: depth_tex - TEXTURE_2D depth texture object"...
I think the problem was that every which way I tried I couldn't seem to get an attachment-complete glBindFramebufferEXT with this approach. glBindTexture creates a texture object but attaches it to current rendercontext as I understand it, so I was doing this then unbinding... maybe I need to reexamine the steps used in creating the texture. Do I need to select the right texture unit# during the texture creation step? Maybe that's where it's going wrong?? Any help on the steps required to create the texture welcome.

PS I am checking OpenGL errors with a function call which ASSERTs if any - during testing I added it after almost every gl call, just chopped it out to keep the code example here clearer. Good point raised though, I was getting errors initially, but the code above gets no errors, with either version of the #ifdef.

PPS I haven't seen glIntercept before... I searched and found it at http://glintercept.nutty.org/index.html - very interesting, thanks. Also I looked at http://www.gremedy.com but that costs $ and I have only just seen it a few days ago, don't know how these compare but it's good to know there's some debugging tools arriving.
 
You probably want something like this (forgive the poor pseudo code syntax, it is late on a Friday):

glGenFBOs( numShadowMaps, fbos);
glGenTextures( numShadowMaps, tex);

for i = 0 to numShadowMaps {
glBindTexture( GL_TEXTURE_2D, tex)
glTexImage2D( ..., GL_DEPTH_COMPONENT, ..., NULL);
glBindTex( 0);
glBindFBO( fbos);
glBindTexToFBO( DEPTH, tex);
}

At render time, it works like this:

for i = 0 to numShadowMaps {
glBindFBO( fbos);
//set matrices and render
}
glBindFBO(0);

//now bind tex to the right textures, and render the scene.

-Evan
 
GIZZZ
Why the hell can't they keep glext.h synced with the extensions ?
Yet again the latest addition is missing from the file (sure I can add it, but when you do something you do it RIGHT)

BTW the ATI SDK (which I got a few days ago) is BIG, couldn't it be available for browsing like on the nVidia site (not sure they are still doing it though) ?
 
I didn't want to code today, but checking code I'm changing stuff :(

Humus, what about that
"Use properly specified textures
Textures intended for rendering to should be properly setup at an early stage. If you plan on using mipmapping on a render target it’s best to allocate all mipmap levels at once at creation time, rather than relying on glGenerateMipmapEXT() to create them for you."

What's better (both being the init sequence of a texture 'target')
Code:
   glBindTexture( m_eGLTarget, m_uiGLID );

		for ( unsigned int i = 0; i < uiMipmapCount; ++i, uiWidth>>=1, uiHeight>>=1 )
		{
			uiWidth = max( uiWidth, 1 );
			uiHeight = max( uiHeight, 1 );
			glTexImage2D( m_eGLTarget, 0, m_eGLInternalFormat, uiWidth, uiHeight, 0, eGLExtFormat, GL_INT, NULL );
		}

OR

Code:
   glBindTexture( m_eGLTarget, m_uiGLID );

glTexImage2D( m_eGLTarget, 0, m_eGLInternalFormat, uiWidth, uiHeight, 0, eGLExtFormat, GL_INT, NULL );
glGenerateMipmapEXT( m_eGLTarget );
 
The first one has a bug, you have to use i instead of 0 for the level parameter of glTexImage2D.

The first one is better because the second one will waste time generating mipmaps from meaningless data. The paragraph you quoted is about allocation, though, and both variants do early allocation as opposed to allocating just the base level using glGenerateMipmapEXT() only after rendering to the target.
 
Xmas said:
The first one has a bug, you have to use i instead of 0 for the level parameter of glTexImage2D.
Indeed, corrected.

Xmas said:
The first one is better because the second one will waste time generating mipmaps from meaningless data. The paragraph you quoted is about allocation, though, and both variants do early allocation as opposed to allocating just the base level using glGenerateMipmapEXT() only after rendering to the target.
Ok, so my approach was correct anyway but changing glGenerateMipmapEXT() to a loop for render texture declaration is better.

Changes commited to CVS then.
 
All working now!!!

OK, pretty much as per the pseudocode suggested by ehart and also in the ATI SDK article by Emil Persson that Humus pointed me too. Was still getting framebuffer attachment incomplete messages until I also added in the glTexParameteri lines between the bind and unbind of the texture before trying to bind the fbo. But now it all works, thanks for all suggestions! Here's how the code looks now:

Code:
// During init only

glGetIntegerv(GL_MAX_RENDERBUFFER_SIZE_EXT,&maxrenderbuffersize);
// then check shadowmapsize <= maxrenderbuffersize

glGenFramebuffersEXT(1, &m_Framebuffer);
m_pShadowTextureIDs = new GLuint[m_nShadowTextureIDs];
glGenTextures(m_nShadowTextureIDs,m_pShadowTextureIDs);

ASSERT(::glGetError() == GL_NO_ERROR);


// Shadowmap creation for each light [n] using fbo

GLuint tex = m_pShadowTextureIDs[n];
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_LUMINANCE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, shadowsize, shadowsize, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
glBindTexture(GL_TEXTURE_2D, 0);	// don't leave this texture bound or fbo (zero) will use it as src, want to use it just as dest GL_DEPTH_ATTACHMENT_EXT

QFPExceptionsSimple tfpe(false);	// bug in driver causes exceptions during bind, see http://www.gamedev.net/community/forums/topic.asp?topic_id=336643
					// this little class turns off FPExceptions in constructor, restores FP state in destructor
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, m_Framebuffer);

// Explicitly tell fbo there will not be any drawbuffer (both for output and input), not using GL_COLOR_ATTACHMENT0_EXT
glDrawBuffer(GL_NONE);	// no color buffer dest
glReadBuffer(GL_NONE);	// no color buffer src
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, tex, 0);

ASSERT(::glGetError() == GL_NO_ERROR);
CheckFramebufferStatus();

// Draw depthmap to fbo
glPolygonOffset(polyoffsetFactor, polyoffsetUnits);
DrawSceneFromLightnViewpoint();

glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);


// For each frame, can now bind m_pShadowTextureIDs to desired texture unit and render in one pass

for-all-lights{
  SetCurrentTextureUnit(n+1);		// shadow map for light 0,1,2... uses texture unit 1,2,3...
  ShadowBindTexture(n);			// glActiveTextureARB, glEnable(GL_TEXTURE_2D) etc
}
SetCurrentTextureUnit(0);		// uses texture unit 0 for 'real' scene textures
DrawScene();
 
Back
Top