How feasible are these image formats?

Hello,

I am working on a new game library for my TFT displays and I want to make a sprite table. Right now I have made about 16 sprites, all 16-bit (I made them literally using pencil and paper and writing down the HEX values, don’t judge me)

They are currently only monochrome sprites, and you change the color by setting the foreground color. (Im using the UTFT library, so if you have used it then you would know how colors are set) Anyways… I wanted to make the sprites have some color and I knew beforehand that making each pixel a different color individually, was going to take up too much memory, so I tried some things and this is what I came to.

The code below uses 2 different image formats.
Both are nearly identical in speed, but the second format is actually faster.
What I want to know is how feasible are these formats in terms of what is already being used. And are there any better alternatives that will work on an Arduino and are faster or perhaps take up less memory.

For those interested to know, these formats are comprised of simply the color and how many of that color are next to each other. IE. If 5 pixels are Blue and they are all next to each other, then the number represents 5 of “said color” are next to one another and so draw a line 5 pixels long.

Another important thing to know is, all the lengths of that row must add up to 16, because it’s a 16bit image.

What are your thoughts? (BTW, the images are both the same and are just random colors and lengths, no real rhyme or reason)

#include <UTFT.h>

UTFT    myGLCD(CTE70, 25, 26, 27, 28);

//=====================COLOR_PALLET==========================
#define BLACK   0x0
#define LIGHT_RED	0xFD14
#define RED     0xF800
#define DARK_RED    0x6000
#define LIGHT_ORANGE 0xFF52
#define ORANGE  0xFD00
#define DARK_ORANGE 0xFA80
#define LIGHT_YELLOW 0xFFF4
#define YELLOW  0xD7E0
#define DARK_YELLOW 0xCE40
#define LIGHT_GREEN 0xB7F6
#define GREEN   0x07E0
#define DARK_GREEN 0x0320
#define LIGHT_BLUE 0xA51F
#define BLUE    0x001F
#define DARK_BLUE 0x000C
#define PURPLE  0xF81F
#define CYAN    0x07FF
#define GREY    0x8410
#define WHITE   0xFFFF

// Technically a 16 bit image.

// color, length... 0xFFFFFFFF = transparent
long test[][15] = // needs to be type long for the transparent value. I could shave some memory if I tell it beforehand what the background color is, so that it can be skipped.
{ // how many colors in this row then color, length...
  {7, RED, 1, 0xFFFFFFFF, 4, GREEN, 2, YELLOW, 3, RED, 1, CYAN, 2, PURPLE, 3},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
  {6, WHITE, 3, GREEN, 3, 0xFFFFFFFF, 3, RED, 3, ORANGE, 2, PURPLE, 2},
  {4, GREEN, 5, YELLOW, 3, ORANGE, 3, RED, 5},
};

long test2[][8] =
{
  {16}, // how many rows of colors. Also tells how many rows to skip to find the lengths for that row
  {7, RED, 0xFFFFFFFF, GREEN, YELLOW, RED, CYAN, PURPLE}, // number of colors in this line, color
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {6, WHITE, GREEN, 0xFFFFFFFF, RED, ORANGE, PURPLE},
  {4, GREEN, YELLOW, ORANGE, RED},
  {1, 4, 2, 3, 1, 2, 3}, // length of lines
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5},
  {3, 3, 3, 3, 2, 2},
  {5, 3, 3, 5}
};

void setup()
{
  Serial.begin(115200);
  myGLCD.InitLCD(LANDSCAPE);
  myGLCD.clrScr();
  myGLCD.fillScr(0x0); // black background
  
  long time = micros();
  TestIMG(20, 20, 5);
  Serial.println(micros() - time);

  delay(10); // take a break

  time = micros();
  TestIMG2(20, 120, 5);
  Serial.println(micros() - time);
}

void loop()
{

}

void TestIMG2(int x, int y, int scale)
{
  for (int i = 1, j = test2[0][0]; i <= j; i++)
    for (int r = 0 + (scale * i); r < scale * (i + 1); r++)
      for (int M = 0, c = 0, S = test2[i][0]; c < S; c++)
      {
        //Serial.println(S);
        byte length = test2[i + j][c] * scale;
        if (test2[i][c + 1] != 0xFFFFFFFF) // if not transparent
        {
          myGLCD.setColor(test2[i][c + 1]);
          myGLCD.drawHLine(M + x, r + y, length);
        }
        M += length;
      }
}

void TestIMG( int x,  int y,  int scale)
{
  for (int i = 0, rows = sizeof(test) / sizeof(test[0]); i < rows; i++)
    for (int r = 0 + (scale * i); r < scale * (i + 1); r++)
      for (int M = 0, c = 1, S = (test[i][0] * 2); c < S; c += 2)
      {
        byte length = test[i][c + 1] * scale;
        if (test[i][c] != 0xFFFFFFFF) // if not transparent
        {
          myGLCD.setColor(test[i][c]);
          myGLCD.drawHLine(M + x, r + y, length);
        }
        M += length;
      }
}

You could save space by reserving one of the 65536 colors for "transparent". Perhaps perhaps use 0 for transparent and use very, very dark green (0x0020) for black.

I will do that. Actually instead of 0x0020, I'll use 0x0000 for transparent and 0x0001 for black. We won't be able to tell the difference, but the code will.

Thanks John.

What you describe is called "Run-Length Encoding", and there numerous standard image formats that use RLE. Inventing your own image format seems a little crazy to me, as you'll never be able to use standard tools for creating and editing the images. Do a little research on the dozens (actually, more like hundreds) of existing formats, and pick one that meets your needs. The probability of inventing something that hasn't already been done to death is about zero...

Regards, Ray L.

Inventing your own image format seems a little crazy to me, as you'll never be able to use standard tools for creating and editing the images.

I just needed something that was small enough to store on the arduino itself and without the need to always rely on an SD card or some special chip. Had I gone with something like a RAW or C file, then each 16x16 image would take up 512 byte and would need to go on an SD card.

I will look into the Run-Length Encoding format, thank you Ray.

Another way to encode a 16 by 16 image is to divide the area into quadrants. That is, every 2^n square is either a single block color or is four 2^(n-1) subsquares. This can work well if your output device has a fast method to blatt rectangles.

You would encode the data as a variable-length stream of bits - stored in an array of word (because the avr is a 16-bit chip, I think). That is, although the data is in an array, conceptually its a bit stream.

To perform the encoding, I would be inclined to write a separate java program that reads a PNG file and outputs correctly-formed array initializers.

And don’t forget about using program memory rather than memory memory for these constants.

So a format would be:

Image: 3 bit sprite size (the sprite is 2^n width, so we permit up to 256*256), followed by a block.

Block:
0 <8 bit colour> - solid block.
1 0 - transparent block
1 1 [UL block, UR block, LL block, LR block] - block divided into subblocks

Mod #1 - instead of storing the colour, use a colour table and do the solid block as an offset into that table
Mod #2 - use 0 0 for solid block. That way, you are always at an even position in your bit stream and that might make certain things easier.

Another way it so work out what your drawing primitives are (hline, vline, rectangle) and to hand-code the sprite as a list of parameters for these calls. Use a C union for the differnet paramter lists.

Actually - doesn't UTFT.h have a "draw bitmap" function? Use that - that's what it's for.

I'm aware of what the UTFT library has, but it's still too big.

An image composed of 16x16 pixels using 16-bit colors = 512 bytes. I would like something smaller, but it doesn't seem possible. Even mine are roughly 512 bytes.

I could do maybe a predefined pallet of colors and just assign each pixel a value 0 - FF. (0 = transparent, 1 = black ... FF = white) basically making the array contents a multiplier for the actual color I want.

IE. 0 - 255 then use that to multiply it by 256 to get the actual color.

Idk, I'm still working on it.

Could you use the same trick TV pictures use to reduce bandwidth by Chroma sub-sampling in say 4:2:2. That would reduce a 16 bit colour sprite to (2*16) + (8*16) bytes. Do you really need 16 bit (65536) colour or just 16 colour.

That would work. I was thinking about this last night too. Since the UTFT library already breaks up the 16bit color into 2 bytes, fcl and fch. What I can do is have a predefined pallet of 16 colors in an array. Then have a byte per pixel and use the high and low nibbles to combine the colors.

Ex.
// Not the best choices of colors because I can do a lot with just 0x0F, 0xF0, and 0xFF
byte pallet[16] = { 0x0, Black (0x01), Blue (0x1F), Green (0x7E), Dark Green (0xE0), Red (0xF8), Cyan, Yellow, Purple, Orange … White};

Pixel: 0x02 = ( int(0x0) << 8 | Blue ) = (0x001F) Blue.
0x50 = ( int(Red) << 8 | 0x0 ) = (0xF800) Red
0xF4 = ( int(White) << 8 | Dark Green) = (0xFFE0) Yellow

Something like this where each nibble makes the bigger/overall color. I just need to figure out the pallet I want to use or what is most common to use, that will give me the widest variety of colors.

It actually worked rather well. The color pallet could be a bit better but I’m happy with it.

#include <UTFT.h>

UTFT    myGLCD(CTE70, 25, 26, 27, 28);

byte test3[256] = {
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE0, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A,
  0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A, 0xF0, 0xDE, 0x0F, 0xFE, 0x00, 0xE1, 0xD5, 0x3A
};

byte ColorPallet[16] =
{ 
  0x0, 0x01, 0x10, 0x11, 0x05, 0x50, 0x55, 0x0A,
  0xA0, 0xAA, 0x0E, 0xE0, 0xEE, 0x0F, 0xF0, 0xFF
};

void setup()
{
  Serial.begin(115200);
  myGLCD.InitLCD(LANDSCAPE);
  myGLCD.clrScr();
 
  myGLCD.fillScr(0xFFFF);

  IMG(0, 0, 10);
  ShowPallet(180, 0, 10);
}

void loop()
{

}

void ShowPallet(int x, int y, int Ssize)
{
  for (int i = 0; i <= 0xFF; i++)
  {
    myGLCD.fch = ColorPallet[i / 16];
    myGLCD.fcl = ColorPallet[i % 16];
    myGLCD.fillRect(x + Ssize * (i % 16), y + Ssize * (i / 16), x + Ssize * (i % 16) + Ssize, y + Ssize * (i / 16) + Ssize);
  }
}

void IMG(int x, int y, int Ssize)
{
  for (int i = 0; i <= 0xFF; i++)
  {
    myGLCD.fch = ColorPallet[test3[i] >> 4];
    myGLCD.fcl = ColorPallet[test3[i] & 0x0F];
    myGLCD.fillRect(x + Ssize * (i % 16), y + Ssize * (i / 16), x + Ssize * (i % 16) + Ssize, y + Ssize * (i / 16) + Ssize);
  }
}

What about speed, is it still fast enough for you.

You could maybe apply dithering to increase the apparent colours. Do your sprites in 24 bit colour and then reduce to 16 colours with Floyd-Steinberg dithering and store this as your final bitmap.