Convert floating-point numbers to decimal digits in GLSL
+1 for interesting problem. Was curious so I tried to code this. I need the use of arrays so I chose #version 420 core
. My app is rendering single quad covering screen with coordinates <-1,+1>
. I am using whole ASCII 8x8 pixel 32x8 characters font texture I created some years ago:
The vertex is simple:
//---------------------------------------------------------------------------
// Vertex
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
layout(location=0) in vec4 vertex;
out vec2 pos; // screen position <-1,+1>
void main()
{
pos=vertex.xy;
gl_Position=vertex;
}
//---------------------------------------------------------------------------
Fragment is a bit more complicated:
//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
in vec2 pos; // screen position <-1,+1>
out vec4 gl_FragColor; // fragment output color
uniform sampler2D txr_font; // ASCII 32x8 characters font texture unit
uniform float fxs,fys; // font/screen resolution ratio
//---------------------------------------------------------------------------
const int _txtsiz=32; // text buffer size
int txt[_txtsiz],txtsiz; // text buffer and its actual size
vec4 col; // color interface for txt_print()
//---------------------------------------------------------------------------
void txt_decimal(float x) // print float x into txt
{
int i,j,c; // l is size of string
float y,a;
const float base=10;
// handle sign
if (x<0.0) { txt[txtsiz]='-'; txtsiz++; x=-x; }
else { txt[txtsiz]='+'; txtsiz++; }
// divide to int(x).fract(y) parts of number
y=x; x=floor(x); y-=x;
// handle integer part
i=txtsiz; // start of integer part
for (;txtsiz<_txtsiz;)
{
a=x;
x=floor(x/base);
a-=base*x;
txt[txtsiz]=int(a)+'0'; txtsiz++;
if (x<=0.0) break;
}
j=txtsiz-1; // end of integer part
for (;i<j;i++,j--) // reverse integer digits
{
c=txt[i]; txt[i]=txt[j]; txt[j]=c;
}
// handle fractional part
for (txt[txtsiz]='.',txtsiz++;txtsiz<_txtsiz;)
{
y*=base;
a=floor(y);
y-=a;
txt[txtsiz]=int(a)+'0'; txtsiz++;
if (y<=0.0) break;
}
txt[txtsiz]=0; // string terminator
}
//---------------------------------------------------------------------------
void txt_print(float x0,float y0) // print txt at x0,y0 [chars]
{
int i;
float x,y;
// fragment position [chars] relative to x0,y0
x=0.5*(1.0+pos.x)/fxs; x-=x0;
y=0.5*(1.0-pos.y)/fys; y-=y0;
// inside bbox?
if ((x<0.0)||(x>float(txtsiz))||(y<0.0)||(y>1.0)) return;
// get font texture position for target ASCII
i=int(x); // char index in txt
x-=float(i);
i=txt[i];
x+=float(int(i&31));
y+=float(int(i>>5));
x/=32.0; y/=8.0; // offset in char texture
col=texture2D(txr_font,vec2(x,y));
}
//---------------------------------------------------------------------------
void main()
{
col=vec4(0.0,1.0,0.0,1.0); // background color
txtsiz=0;
txt[txtsiz]='F'; txtsiz++;
txt[txtsiz]='l'; txtsiz++;
txt[txtsiz]='o'; txtsiz++;
txt[txtsiz]='a'; txtsiz++;
txt[txtsiz]='t'; txtsiz++;
txt[txtsiz]=':'; txtsiz++;
txt[txtsiz]=' '; txtsiz++;
txt_decimal(12.345);
txt_print(1.0,1.0);
gl_FragColor=col;
}
//---------------------------------------------------------------------------
Here my CPU side uniforms:
glUniform1i(glGetUniformLocation(prog_id,"txr_font"),0);
glUniform1f(glGetUniformLocation(prog_id,"fxs"),(8.0)/float(xs));
glUniform1f(glGetUniformLocation(prog_id,"fys"),(8.0)/float(ys));
where xs,ys
is my screen resolution. Font is 8x8 in unit 0
Here output for the test fragment code:
If your floating point accuracy is decreased due to HW implementation then you should consider printing in hex where no accuracy loss is present (using binary access). That could be converted to decadic base on integers later ...
see:
- string hex2dec conversion on integer math
[Edit2] old style GLSL shaders
I tried to port to old style GLSL and suddenly it works (before it would not compile with arrays present but when I think of it I was trying char[]
which was the real reason).
//---------------------------------------------------------------------------
// Vertex
//---------------------------------------------------------------------------
varying vec2 pos; // screen position <-1,+1>
void main()
{
pos=gl_Vertex.xy;
gl_Position=gl_Vertex;
}
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
varying vec2 pos; // screen position <-1,+1>
uniform sampler2D txr_font; // ASCII 32x8 characters font texture unit
uniform float fxs,fys; // font/screen resolution ratio
//---------------------------------------------------------------------------
const int _txtsiz=32; // text buffer size
int txt[_txtsiz],txtsiz; // text buffer and its actual size
vec4 col; // color interface for txt_print()
//---------------------------------------------------------------------------
void txt_decimal(float x) // print float x into txt
{
int i,j,c; // l is size of string
float y,a;
const float base=10.0;
// handle sign
if (x<0.0) { txt[txtsiz]='-'; txtsiz++; x=-x; }
else { txt[txtsiz]='+'; txtsiz++; }
// divide to int(x).fract(y) parts of number
y=x; x=floor(x); y-=x;
// handle integer part
i=txtsiz; // start of integer part
for (;txtsiz<_txtsiz;)
{
a=x;
x=floor(x/base);
a-=base*x;
txt[txtsiz]=int(a)+'0'; txtsiz++;
if (x<=0.0) break;
}
j=txtsiz-1; // end of integer part
for (;i<j;i++,j--) // reverse integer digits
{
c=txt[i]; txt[i]=txt[j]; txt[j]=c;
}
// handle fractional part
for (txt[txtsiz]='.',txtsiz++;txtsiz<_txtsiz;)
{
y*=base;
a=floor(y);
y-=a;
txt[txtsiz]=int(a)+'0'; txtsiz++;
if (y<=0.0) break;
}
txt[txtsiz]=0; // string terminator
}
//---------------------------------------------------------------------------
void txt_print(float x0,float y0) // print txt at x0,y0 [chars]
{
int i;
float x,y;
// fragment position [chars] relative to x0,y0
x=0.5*(1.0+pos.x)/fxs; x-=x0;
y=0.5*(1.0-pos.y)/fys; y-=y0;
// inside bbox?
if ((x<0.0)||(x>float(txtsiz))||(y<0.0)||(y>1.0)) return;
// get font texture position for target ASCII
i=int(x); // char index in txt
x-=float(i);
i=txt[i];
x+=float(int(i-((i/32)*32)));
y+=float(int(i/32));
x/=32.0; y/=8.0; // offset in char texture
col=texture2D(txr_font,vec2(x,y));
}
//---------------------------------------------------------------------------
void main()
{
col=vec4(0.0,1.0,0.0,1.0); // background color
txtsiz=0;
txt[txtsiz]='F'; txtsiz++;
txt[txtsiz]='l'; txtsiz++;
txt[txtsiz]='o'; txtsiz++;
txt[txtsiz]='a'; txtsiz++;
txt[txtsiz]='t'; txtsiz++;
txt[txtsiz]=':'; txtsiz++;
txt[txtsiz]=' '; txtsiz++;
txt_decimal(12.345);
txt_print(1.0,1.0);
gl_FragColor=col;
}
//---------------------------------------------------------------------------
First of all I want to mention that the amazing solution of Spektre is almost perfect and even more a general solution for text output. I gave his answer an upvote. As an alternative, I present a minimally invasive solution, and improve the code of the question.
I do not want to conceal the fact that I have studied the solution of Spektre and integrated into my solution.
// Assume that the texture to which uTextureSlotNumber refers contains
// a rendering of the digits '0123456789' packed together, such that
const vec2 startOfDigitsInTexture = vec2( 100, 125 ); // the lower-left corner of the first digit starts here and
const vec2 sizeOfDigit = vec2( 0.1, 0.2 ); // each digit spans this many pixels
const float nSpaces = 10.0; // assume we have this many digits' worth of space to render in
void RenderDigit( int strPos, int digit, vec2 pos )
{
float testStrPos = pos.x / sizeOfDigit.x;
if ( testStrPos >= float(strPos) && testStrPos < float(strPos+1) )
{
float start = sizeOfDigit.x * float(digit);
vec2 textureSourcePosition = vec2( startOfDigitsInTexture.x + start + mod( pos.x, sizeOfDigit.x ), startOfDigitsInTexture.y + pos.y );
gl_FragColor = texture2D( uTextureSlotNumber, textureSourcePosition / uTextureSize );
}
}
The function ValueToDigits
interprets a floating point number an fills up an array with the digits.
Each number in the array is in (0
, 9
).
const int MAX_DIGITS = 32;
int digits[MAX_DIGITS];
int noOfDigits = 0;
int posOfComma = 0;
void Reverse( int start, int end )
{
for ( ; start < end; ++ start, -- end )
{
int digit = digits[start];
digits[start] = digits[end];
digits[end] = digit;
}
}
void ValueToDigits( float value )
{
const float base = 10.0;
int start = noOfDigits;
value = abs( value );
float frac = value; value = floor(value); frac -= value;
// integral digits
for ( ; value > 0.0 && noOfDigits < MAX_DIGITS; ++ noOfDigits )
{
float newValue = floor( value / base );
digits[noOfDigits] = int( value - base * newValue );
value = newValue;
}
Reverse( start, noOfDigits-1 );
posOfComma = noOfDigits;
// fractional digits
for ( ; frac > 0.0 && noOfDigits < MAX_DIGITS; ++ noOfDigits )
{
frac *= base;
float digit = floor( frac );
frac -= digit;
digits[noOfDigits] = int( digit );
}
}
Call ValueToDigits
in your original function and find the digit and textur coordinates for the current fragment.
void RenderDecimal( float value )
{
// fill the array of digits with the floating point value
ValueToDigits( value );
// Render the digits
vec2 pos = vFragCoordinate.xy - startOfDigitsInTexture;
if( pos.x >= 0 && pos.x < sizeOfDigit.x * nSpaces && pos.y >= 0 && pos.y < sizeOfDigit.y )
{
// render the digits
for ( int strPos = 0; strPos < noOfDigits; ++ strPos )
RenderDigit( strPos, digits[strPos], pos );
}
// Render the decimal point
float testStrPos = pos.x / sizeOfDigit.x;
float remainder = mod( pos.x, sizeOfDigit.x );
if( ( testStrPos >= float(posOfComma) && testStrPos < float(posOfComma+1) && remainder / sizeOfDigit.x < 0.1 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) ||
( testStrPos >= float(posOfComma-1) && testStrPos < float(posOfComma) && remainder / sizeOfDigit.x > 0.9 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) )
{
gl_FragColor = texture2D( uTextureSlotNumber, ( startOfDigitsInTexture + sizeOfDigit * vec2( 1.5, 0.5 ) ) / uTextureSize );
}
}
Here's my updated fragment shader, which can be dropped into the listing in my original question. It implements the decimal-digit-finding algorithm Spektre proposed, in a way that is even compatible with the legacy GLSL 1.20 dialect I'm using. Without that constraint, Spektre's solution is, of course, much more elegant and powerful.
varying vec2 vFragCoordinate;
uniform vec2 uTextureSize;
uniform sampler2D uTextureSlotNumber;
float Digit( float x, int position, float base )
{
int i;
float digit;
if( position < 0 )
{
x = fract( x );
for( i = -1; i >= position; i-- )
{
if( x <= 0.0 ) { digit = 0.0; break; }
x *= base;
digit = floor( x );
x -= digit;
}
}
else
{
x = floor( x );
float prevx;
for( i = 0; i <= position; i++ )
{
if( x <= 0.0 ) { digit = 0.0; break; }
prevx = x;
x = floor( x / base );
digit = prevx - base * x;
}
}
return digit;
}
float OrderOfMagnitude( float x )
{
return x == 0.0 ? 0.0 : floor( log( abs( x ) ) / log( 10.0 ) );
}
void RenderDecimal( float value )
{
// Assume that the texture to which uTextureSlotNumber refers contains
// a rendering of the digits '0123456789' packed together, such that
const vec2 startOfDigitsInTexture = vec2( 0, 0 ); // the lower-left corner of the first digit starts here and
const vec2 sizeOfDigit = vec2( 100, 125 ); // each digit spans this many pixels
const float nSpaces = 10.0; // assume we have this many digits' worth of space to render in
value = abs( value );
vec2 pos = vFragCoordinate - startOfDigitsInTexture;
float dpstart = max( 0.0, OrderOfMagnitude( value ) );
int decimal_position = int( dpstart - floor( pos.x / sizeOfDigit.x ) );
float remainder = mod( pos.x, sizeOfDigit.x );
if( pos.x >= 0.0 && pos.x < sizeOfDigit.x * nSpaces && pos.y >= 0.0 && pos.y < sizeOfDigit.y )
{
float digit_value = Digit( value, decimal_position, 10.0 );
vec2 textureSourcePosition = vec2( startOfDigitsInTexture.x + remainder + digit_value * sizeOfDigit.x, startOfDigitsInTexture.y + pos.y );
gl_FragColor = texture2D( uTextureSlotNumber, textureSourcePosition / uTextureSize );
}
// Render the decimal point
if( ( decimal_position == -1 && remainder / sizeOfDigit.x < 0.1 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) ||
( decimal_position == 0 && remainder / sizeOfDigit.x > 0.9 && abs( pos.y ) / sizeOfDigit.y < 0.1 ) )
{
gl_FragColor = texture2D( uTextureSlotNumber, ( startOfDigitsInTexture + sizeOfDigit * vec2( 1.5, 0.5 ) ) / uTextureSize );
}
}
void main(void)
{
gl_FragColor = texture2D( uTextureSlotNumber, vFragCoordinate / uTextureSize );
RenderDecimal( 2.5 ); // for current demonstration purposes, just a constant
}