From 13737a64ff1c16578609820eaa74c7243f178f6e Mon Sep 17 00:00:00 2001 From: Andrey Sidorov Date: Fri, 20 Sep 2013 14:29:00 +1000 Subject: [PATCH 1/2] initial js-only gif encoding with omggif and NetQuant --- lib/neu_quant.js | 434 ++++++++++++++++++++++++++++ lib/omggif.js | 735 +++++++++++++++++++++++++++++++++++++++++++++++ lib/stream.js | 47 +++ package.json | 2 +- vnc-over-gif.js | 57 +--- 5 files changed, 1232 insertions(+), 43 deletions(-) create mode 100644 lib/neu_quant.js create mode 100644 lib/omggif.js create mode 100644 lib/stream.js diff --git a/lib/neu_quant.js b/lib/neu_quant.js new file mode 100644 index 0000000..200d6bb --- /dev/null +++ b/lib/neu_quant.js @@ -0,0 +1,434 @@ +/* NeuQuant Neural-Net Quantization Algorithm + * ------------------------------------------ + * + * Copyright (c) 1994 Anthony Dekker + * + * NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994. + * See "Kohonen neural networks for optimal colour quantization" + * in "Network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367. + * for a discussion of the algorithm. + * See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML + * + * Any party obtaining a copy of these files from the author, directly or + * indirectly, is granted, free of charge, a full and unrestricted irrevocable, + * world-wide, paid up, royalty-free, nonexclusive right and license to deal + * in this software and documentation files (the "Software"), including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons who receive + * copies from any such party to do so, with the only requirement being + * that this copyright notice remain intact. + * + * (JavaScript port 2012 by Johan Nordberg) + */ + +function toInt(v) { + return ~~v; +} + +var ncycles = 100; // number of learning cycles +var netsize = 256; // number of colors used +var maxnetpos = netsize - 1; + +// defs for freq and bias +var netbiasshift = 4; // bias for colour values +var intbiasshift = 16; // bias for fractions +var intbias = (1 << intbiasshift); +var gammashift = 10; +var gamma = (1 << gammashift); +var betashift = 10; +var beta = (intbias >> betashift); /* beta = 1/1024 */ +var betagamma = (intbias << (gammashift - betashift)); + +// defs for decreasing radius factor +var initrad = (netsize >> 3); // for 256 cols, radius starts +var radiusbiasshift = 6; // at 32.0 biased by 6 bits +var radiusbias = (1 << radiusbiasshift); +var initradius = (initrad * radiusbias); //and decreases by a +var radiusdec = 30; // factor of 1/30 each cycle + +// defs for decreasing alpha factor +var alphabiasshift = 10; // alpha starts at 1.0 +var initalpha = (1 << alphabiasshift); +var alphadec; // biased by 10 bits + +/* radbias and alpharadbias used for radpower calculation */ +var radbiasshift = 8; +var radbias = (1 << radbiasshift); +var alpharadbshift = (alphabiasshift + radbiasshift); +var alpharadbias = (1 << alpharadbshift); + +// four primes near 500 - assume no image has a length so large that it is +// divisible by all four primes +var prime1 = 499; +var prime2 = 491; +var prime3 = 487; +var prime4 = 503; +var minpicturebytes = (3 * prime4); + +/* + Constructor: NeuQuant + + Arguments: + + pixels - array of pixels in RGB format + samplefac - sampling factor 1 to 30 where lower is better quality + + > + > pixels = [r, g, b, r, g, b, r, g, b, ..] + > +*/ +function NeuQuant(pixels, samplefac) { + var network; // int[netsize][4] + var netindex; // for network lookup - really 256 + + // bias and freq arrays for learning + var bias; + var freq; + var radpower; + + /* + Private Method: init + + sets up arrays + */ + function init() { + network = []; + netindex = []; + bias = []; + freq = []; + radpower = []; + + var i, v; + for (i = 0; i < netsize; i++) { + v = (i << (netbiasshift + 8)) / netsize; + network[i] = [v, v, v]; + freq[i] = intbias / netsize; + bias[i] = 0; + } + } + + /* + Private Method: unbiasnet + + unbiases network to give byte values 0..255 and record position i to prepare for sort + */ + function unbiasnet() { + for (var i = 0; i < netsize; i++) { + network[i][0] >>= netbiasshift; + network[i][1] >>= netbiasshift; + network[i][2] >>= netbiasshift; + network[i][3] = i; // record color number + } + } + + /* + Private Method: altersingle + + moves neuron *i* towards biased (b,g,r) by factor *alpha* + */ + function altersingle(alpha, i, b, g, r) { + network[i][0] -= (alpha * (network[i][0] - b)) / initalpha; + network[i][1] -= (alpha * (network[i][1] - g)) / initalpha; + network[i][2] -= (alpha * (network[i][2] - r)) / initalpha; + } + + /* + Private Method: alterneigh + + moves neurons in *radius* around index *i* towards biased (b,g,r) by factor *alpha* + */ + function alterneigh(radius, i, b, g, r) { + var lo = Math.abs(i - radius); + var hi = Math.min(i + radius, netsize); + + var j = i + 1; + var k = i - 1; + var m = 1; + + var p, a; + while ((j < hi) || (k > lo)) { + a = radpower[m++]; + + if (j < hi) { + p = network[j++]; + p[0] -= (a * (p[0] - b)) / alpharadbias; + p[1] -= (a * (p[1] - g)) / alpharadbias; + p[2] -= (a * (p[2] - r)) / alpharadbias; + } + + if (k > lo) { + p = network[k--]; + p[0] -= (a * (p[0] - b)) / alpharadbias; + p[1] -= (a * (p[1] - g)) / alpharadbias; + p[2] -= (a * (p[2] - r)) / alpharadbias; + } + } + } + + /* + Private Method: contest + + searches for biased BGR values + */ + function contest(b, g, r) { + /* + finds closest neuron (min dist) and updates freq + finds best neuron (min dist-bias) and returns position + for frequently chosen neurons, freq[i] is high and bias[i] is negative + bias[i] = gamma * ((1 / netsize) - freq[i]) + */ + + var bestd = ~(1 << 31); + var bestbiasd = bestd; + var bestpos = -1; + var bestbiaspos = bestpos; + + var i, n, dist, biasdist, betafreq; + for (i = 0; i < netsize; i++) { + n = network[i]; + + dist = Math.abs(n[0] - b) + Math.abs(n[1] - g) + Math.abs(n[2] - r); + if (dist < bestd) { + bestd = dist; + bestpos = i; + } + + biasdist = dist - ((bias[i]) >> (intbiasshift - netbiasshift)); + if (biasdist < bestbiasd) { + bestbiasd = biasdist; + bestbiaspos = i; + } + + betafreq = (freq[i] >> betashift); + freq[i] -= betafreq; + bias[i] += (betafreq << gammashift); + } + + freq[bestpos] += beta; + bias[bestpos] -= betagamma; + + return bestbiaspos; + } + + /* + Private Method: inxbuild + + sorts network and builds netindex[0..255] + */ + function inxbuild() { + var i, j, p, q, smallpos, smallval, previouscol = 0, startpos = 0; + for (i = 0; i < netsize; i++) { + p = network[i]; + smallpos = i; + smallval = p[1]; // index on g + // find smallest in i..netsize-1 + for (j = i + 1; j < netsize; j++) { + q = network[j]; + if (q[1] < smallval) { // index on g + smallpos = j; + smallval = q[1]; // index on g + } + } + q = network[smallpos]; + // swap p (i) and q (smallpos) entries + if (i != smallpos) { + j = q[0]; q[0] = p[0]; p[0] = j; + j = q[1]; q[1] = p[1]; p[1] = j; + j = q[2]; q[2] = p[2]; p[2] = j; + j = q[3]; q[3] = p[3]; p[3] = j; + } + // smallval entry is now in position i + + if (smallval != previouscol) { + netindex[previouscol] = (startpos + i) >> 1; + for (j = previouscol + 1; j < smallval; j++) + netindex[j] = i; + previouscol = smallval; + startpos = i; + } + } + netindex[previouscol] = (startpos + maxnetpos) >> 1; + for (j = previouscol + 1; j < 256; j++) + netindex[j] = maxnetpos; // really 256 + } + + /* + Private Method: inxsearch + + searches for BGR values 0..255 and returns a color index + */ + function inxsearch(b, g, r) { + var a, p, dist; + + var bestd = 1000; // biggest possible dist is 256*3 + var best = -1; + + var i = netindex[g]; // index on g + var j = i - 1; // start at netindex[g] and work outwards + + while ((i < netsize) || (j >= 0)) { + if (i < netsize) { + p = network[i]; + dist = p[1] - g; // inx key + if (dist >= bestd) i = netsize; // stop iter + else { + i++; + if (dist < 0) dist = -dist; + a = p[0] - b; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + a = p[2] - r; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + bestd = dist; + best = p[3]; + } + } + } + } + if (j >= 0) { + p = network[j]; + dist = g - p[1]; // inx key - reverse dif + if (dist >= bestd) j = -1; // stop iter + else { + j--; + if (dist < 0) dist = -dist; + a = p[0] - b; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + a = p[2] - r; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + bestd = dist; + best = p[3]; + } + } + } + } + } + + return best; + } + + /* + Private Method: learn + + "Main Learning Loop" + */ + function learn() { + var i; + + var lengthcount = pixels.length; + var alphadec = toInt(30 + ((samplefac - 1) / 3)); + var samplepixels = toInt(lengthcount / (3 * samplefac)); + var delta = toInt(samplepixels / ncycles); + var alpha = initalpha; + var radius = initradius; + + var rad = radius >> radiusbiasshift; + + if (rad <= 1) rad = 0; + for (i = 0; i < rad; i++) + radpower[i] = toInt(alpha * (((rad * rad - i * i) * radbias) / (rad * rad))); + + var step; + if (lengthcount < minpicturebytes) { + samplefac = 1; + step = 3; + } else if ((lengthcount % prime1) !== 0) { + step = 3 * prime1; + } else if ((lengthcount % prime2) !== 0) { + step = 3 * prime2; + } else if ((lengthcount % prime3) !== 0) { + step = 3 * prime3; + } else { + step = 3 * prime4; + } + + var b, g, r, j; + var pix = 0; // current pixel + + i = 0; + while (i < samplepixels) { + b = (pixels[pix] & 0xff) << netbiasshift; + g = (pixels[pix + 1] & 0xff) << netbiasshift; + r = (pixels[pix + 2] & 0xff) << netbiasshift; + + j = contest(b, g, r); + + altersingle(alpha, j, b, g, r); + if (rad !== 0) alterneigh(rad, j, b, g, r); // alter neighbours + + pix += step; + if (pix >= lengthcount) pix -= lengthcount; + + i++; + + if (delta === 0) delta = 1; + if (i % delta === 0) { + alpha -= alpha / alphadec; + radius -= radius / radiusdec; + rad = radius >> radiusbiasshift; + + if (rad <= 1) rad = 0; + for (j = 0; j < rad; j++) + radpower[j] = toInt(alpha * (((rad * rad - j * j) * radbias) / (rad * rad))); + } + } + } + + /* + Method: buildColormap + + 1. initializes network + 2. trains it + 3. removes misconceptions + 4. builds colorindex + */ + function buildColormap() { + init(); + learn(); + unbiasnet(); + inxbuild(); + } + this.buildColormap = buildColormap; + + /* + Method: getColormap + + builds colormap from the index + + returns array in the format: + + > + > [r, g, b, r, g, b, r, g, b, ..] + > + */ + function getColormap() { + var map = []; + var index = []; + + for (var i = 0; i < netsize; i++) + index[network[i][3]] = i; + + var k = 0; + for (var l = 0; l < netsize; l++) { + var j = index[l]; + map[k++] = (network[j][0]); + map[k++] = (network[j][1]); + map[k++] = (network[j][2]); + } + return map; + } + this.getColormap = getColormap; + + /* + Method: lookupRGB + + looks for the closest *r*, *g*, *b* color in the map and + returns its index + */ + this.lookupRGB = inxsearch; +} + +module.exports = NeuQuant; diff --git a/lib/omggif.js b/lib/omggif.js new file mode 100644 index 0000000..5ced847 --- /dev/null +++ b/lib/omggif.js @@ -0,0 +1,735 @@ +// (c) Dean McNamee , 2013. +// +// https://github.com/deanm/omggif +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +// IN THE SOFTWARE. +// +// omggif is a JavaScript implementation of a GIF 89a encoder, including +// animation and compression. It does not rely on any specific underlying +// system, so should run in the browser, Node, or Plask. + +function GifWriter(buf, width, height, gopts) { + var p = 0; + + var gopts = gopts === undefined ? { } : gopts; + var loop_count = gopts.loop === undefined ? null : gopts.loop; + var global_palette = gopts.palette === undefined ? null : gopts.palette; + + if (width <= 0 || height <= 0 || width > 65535 || height > 65535) + throw "Width/Height invalid." + + function check_palette_and_num_colors(palette) { + var num_colors = palette.length; + if (num_colors < 2 || num_colors > 256 || num_colors & (num_colors-1)) + throw "Invalid code/color length, must be power of 2 and 2 .. 256."; + return num_colors; + } + + // - Header. + buf[p++] = 0x47; buf[p++] = 0x49; buf[p++] = 0x46; // GIF + buf[p++] = 0x38; buf[p++] = 0x39; buf[p++] = 0x61; // 89a + + // Handling of Global Color Table (palette) and background index. + var gp_num_colors_pow2 = 0; + var background = 0; + if (global_palette !== null) { + var gp_num_colors = check_palette_and_num_colors(global_palette); + while (gp_num_colors >>= 1) ++gp_num_colors_pow2; + gp_num_colors = 1 << gp_num_colors_pow2; + --gp_num_colors_pow2; + if (gopts.background !== undefined) { + background = gopts.background; + if (background >= gp_num_colors) throw "Background index out of range."; + // The GIF spec states that a background index of 0 should be ignored, so + // this is probably a mistake and you really want to set it to another + // slot in the palette. But actually in the end most browsers, etc end + // up ignoring this almost completely (including for dispose background). + if (background === 0) + throw "Background index explicitly passed as 0."; + } + } + + // - Logical Screen Descriptor. + // NOTE(deanm): w/h apparently ignored by implementations, but set anyway. + buf[p++] = width & 0xff; buf[p++] = width >> 8 & 0xff; + buf[p++] = height & 0xff; buf[p++] = height >> 8 & 0xff; + // NOTE: Indicates 0-bpp original color resolution (unused?). + buf[p++] = (global_palette !== null ? 0x80 : 0) | // Global Color Table Flag. + gp_num_colors_pow2; // NOTE: No sort flag (unused?). + buf[p++] = background; // Background Color Index. + buf[p++] = 0; // Pixel aspect ratio (unused?). + + // - Global Color Table + if (global_palette !== null) { + for (var i = 0, il = global_palette.length; i < il; ++i) { + var rgb = global_palette[i]; + buf[p++] = rgb >> 16 & 0xff; + buf[p++] = rgb >> 8 & 0xff; + buf[p++] = rgb & 0xff; + } + } + + if (loop_count !== null) { // Netscape block for looping. + if (loop_count < 0 || loop_count > 65535) + throw "Loop count invalid." + // Extension code, label, and length. + buf[p++] = 0x21; buf[p++] = 0xff; buf[p++] = 0x0b; + // NETSCAPE2.0 + buf[p++] = 0x4e; buf[p++] = 0x45; buf[p++] = 0x54; buf[p++] = 0x53; + buf[p++] = 0x43; buf[p++] = 0x41; buf[p++] = 0x50; buf[p++] = 0x45; + buf[p++] = 0x32; buf[p++] = 0x2e; buf[p++] = 0x30; + // Sub-block + buf[p++] = 0x03; buf[p++] = 0x01; + buf[p++] = loop_count & 0xff; buf[p++] = loop_count >> 8 & 0xff; + buf[p++] = 0x00; // Terminator. + } + + + var ended = false; + + this.addFrame = function(x, y, w, h, indexed_pixels, opts) { + if (ended === true) { --p; ended = false; } // Un-end. + + opts = opts === undefined ? { } : opts; + + // TODO(deanm): Bounds check x, y. Do they need to be within the virtual + // canvas width/height, I imagine? + if (x < 0 || y < 0 || x > 65535 || y > 65535) + throw "x/y invalid." + + if (w <= 0 || h <= 0 || w > 65535 || h > 65535) + throw "Width/Height invalid." + + if (indexed_pixels.length < w * h) + throw "Not enough pixels for the frame size."; + + var using_local_palette = true; + var palette = opts.palette; + if (palette === undefined || palette === null) { + using_local_palette = false; + palette = global_palette; + } + + if (palette === undefined || palette === null) + throw "Must supply either a local or global palette."; + + var num_colors = check_palette_and_num_colors(palette); + + // Compute the min_code_size (power of 2), destroying num_colors. + var min_code_size = 0; + while (num_colors >>= 1) ++min_code_size; + num_colors = 1 << min_code_size; // Now we can easily get it back. + + var delay = opts.delay === undefined ? 0 : opts.delay; + + // From the spec: + // 0 - No disposal specified. The decoder is + // not required to take any action. + // 1 - Do not dispose. The graphic is to be left + // in place. + // 2 - Restore to background color. The area used by the + // graphic must be restored to the background color. + // 3 - Restore to previous. The decoder is required to + // restore the area overwritten by the graphic with + // what was there prior to rendering the graphic. + // 4-7 - To be defined. + // NOTE(deanm): Dispose background doesn't really work, apparently most + // browsers ignore the background palette index and clear to transparency. + var disposal = opts.disposal === undefined ? 0 : opts.disposal; + if (disposal < 0 || disposal > 3) // 4-7 is reserved. + throw "Disposal out of range."; + + var use_transparency = false; + var transparent_index = 0; + if (opts.transparent !== undefined && opts.transparent !== null) { + use_transparency = true; + transparent_index = opts.transparent; + if (transparent_index < 0 || transparent_index >= num_colors) + throw "Transparent color index."; + } + + if (disposal !== 0 || use_transparency || delay !== 0) { + // - Graphics Control Extension + buf[p++] = 0x21; buf[p++] = 0xf9; // Extension / Label. + buf[p++] = 4; // Byte size. + + buf[p++] = disposal << 2 | (use_transparency === true ? 1 : 0); + buf[p++] = delay & 0xff; buf[p++] = delay >> 8 & 0xff; + buf[p++] = transparent_index; // Transparent color index. + buf[p++] = 0; // Block Terminator. + } + + // - Image Descriptor + buf[p++] = 0x2c; // Image Seperator. + buf[p++] = x & 0xff; buf[p++] = x >> 8 & 0xff; // Left. + buf[p++] = y & 0xff; buf[p++] = y >> 8 & 0xff; // Top. + buf[p++] = w & 0xff; buf[p++] = w >> 8 & 0xff; + buf[p++] = h & 0xff; buf[p++] = h >> 8 & 0xff; + // NOTE: No sort flag (unused?). + // TODO(deanm): Support interlace. + buf[p++] = using_local_palette === true ? (0x80 | (min_code_size-1)) : 0; + + // - Local Color Table + if (using_local_palette === true) { + for (var i = 0, il = palette.length; i < il; ++i) { + var rgb = palette[i]; + buf[p++] = rgb >> 16 & 0xff; + buf[p++] = rgb >> 8 & 0xff; + buf[p++] = rgb & 0xff; + } + } + + p = GifWriterOutputLZWCodeStream( + buf, p, min_code_size < 2 ? 2 : min_code_size, indexed_pixels); + }; + + this.read = function() { + var data = buf.slice(0, p); + p = 0; + return data; + }; + + this.end = function() { + if (ended === false) { + buf[p++] = 0x3b; // Trailer. + ended = true; + } + return p; + }; +} + +// Main compression routine, palette indexes -> LZW code stream. +// |index_stream| must have at least one entry. +function GifWriterOutputLZWCodeStream(buf, p, min_code_size, index_stream) { + buf[p++] = min_code_size; + var cur_subblock = p++; // Pointing at the length field. + + var clear_code = 1 << min_code_size; + var code_mask = clear_code - 1; + var eoi_code = clear_code + 1; + var next_code = eoi_code + 1; + + var cur_code_size = min_code_size + 1; // Number of bits per code. + var cur_shift = 0; + // We have at most 12-bit codes, so we should have to hold a max of 19 + // bits here (and then we would write out). + var cur = 0; + + function emit_bytes_to_buffer(bit_block_size) { + while (cur_shift >= bit_block_size) { + buf[p++] = cur & 0xff; + cur >>= 8; cur_shift -= 8; + if (p === cur_subblock + 256) { // Finished a subblock. + buf[cur_subblock] = 255; + cur_subblock = p++; + } + } + } + + function emit_code(c) { + cur |= c << cur_shift; + cur_shift += cur_code_size; + emit_bytes_to_buffer(8); + } + + // I am not an expert on the topic, and I don't want to write a thesis. + // However, it is good to outline here the basic algorithm and the few data + // structures and optimizations here that make this implementation fast. + // The basic idea behind LZW is to build a table of previously seen runs + // addressed by a short id (herein called output code). All data is + // referenced by a code, which represents one or more values from the + // original input stream. All input bytes can be referenced as the same + // value as an output code. So if you didn't want any compression, you + // could more or less just output the original bytes as codes (there are + // some details to this, but it is the idea). In order to achieve + // compression, values greater then the input range (codes can be up to + // 12-bit while input only 8-bit) represent a sequence of previously seen + // inputs. The decompressor is able to build the same mapping while + // decoding, so there is always a shared common knowledge between the + // encoding and decoder, which is also important for "timing" aspects like + // how to handle variable bit width code encoding. + // + // One obvious but very important consequence of the table system is there + // is always a unique id (at most 12-bits) to map the runs. 'A' might be + // 4, then 'AA' might be 10, 'AAA' 11, 'AAAA' 12, etc. This relationship + // can be used for an effecient lookup strategy for the code mapping. We + // need to know if a run has been seen before, and be able to map that run + // to the output code. Since we start with known unique ids (input bytes), + // and then from those build more unique ids (table entries), we can + // continue this chain (almost like a linked list) to always have small + // integer values that represent the current byte chains in the encoder. + // This means instead of tracking the input bytes (AAAABCD) to know our + // current state, we can track the table entry for AAAABC (it is guaranteed + // to exist by the nature of the algorithm) and the next character D. + // Therefor the tuple of (table_entry, byte) is guaranteed to also be + // unique. This allows us to create a simple lookup key for mapping input + // sequences to codes (table indices) without having to store or search + // any of the code sequences. So if 'AAAA' has a table entry of 12, the + // tuple of ('AAAA', K) for any input byte K will be unique, and can be our + // key. This leads to a integer value at most 20-bits, which can always + // fit in an SMI value and be used as a fast sparse array / object key. + + // Output code for the current contents of the index buffer. + var ib_code = index_stream[0] & code_mask; // Load first input index. + var code_table = { }; // Key'd on our 20-bit "tuple". + + emit_code(clear_code); // Spec says first code should be a clear code. + + // First index already loaded, process the rest of the stream. + for (var i = 1, il = index_stream.length; i < il; ++i) { + var k = index_stream[i] & code_mask; + var cur_key = ib_code << 8 | k; // (prev, k) unique tuple. + var cur_code = code_table[cur_key]; // buffer + k. + + // Check if we have to create a new code table entry. + if (cur_code === undefined) { // We don't have buffer + k. + // Emit index buffer (without k). + // This is an inline version of emit_code, because this is the core + // writing routine of the compressor (and V8 cannot inline emit_code + // because it is a closure here in a different context). Additionally + // we can call emit_byte_to_buffer less often, because we can have + // 30-bits (from our 31-bit signed SMI), and we know our codes will only + // be 12-bits, so can safely have 18-bits there without overflow. + // emit_code(ib_code); + cur |= ib_code << cur_shift; + cur_shift += cur_code_size; + while (cur_shift >= 8) { + buf[p++] = cur & 0xff; + cur >>= 8; cur_shift -= 8; + if (p === cur_subblock + 256) { // Finished a subblock. + buf[cur_subblock] = 255; + cur_subblock = p++; + } + } + + if (next_code === 4096) { // Table full, need a clear. + emit_code(clear_code); + next_code = eoi_code + 1; + cur_code_size = min_code_size + 1; + code_table = { }; + } else { // Table not full, insert a new entry. + // Increase our variable bit code sizes if necessary. This is a bit + // tricky as it is based on "timing" between the encoding and + // decoder. From the encoders perspective this should happen after + // we've already emitted the index buffer and are about to create the + // first table entry that would overflow our current code bit size. + if (next_code >= (1 << cur_code_size)) ++cur_code_size; + code_table[cur_key] = next_code++; // Insert into code table. + } + + ib_code = k; // Index buffer to single input k. + } else { + ib_code = cur_code; // Index buffer to sequence in code table. + } + } + + emit_code(ib_code); // There will still be something in the index buffer. + emit_code(eoi_code); // End Of Information. + + // Flush / finalize the sub-blocks stream to the buffer. + emit_bytes_to_buffer(1); + + // Finish the sub-blocks, writing out any unfinished lengths and + // terminating with a sub-block of length 0. If we have already started + // but not yet used a sub-block it can just become the terminator. + if (cur_subblock + 1 === p) { // Started but unused. + buf[cur_subblock] = 0; + } else { // Started and used, write length and additional terminator block. + buf[cur_subblock] = p - cur_subblock - 1; + buf[p++] = 0; + } + return p; +} + +function GifReader(buf) { + var p = 0; + + // - Header. + if (buf[p++] !== 0x47 || buf[p++] !== 0x49 || buf[p++] !== 0x46 || // GIF + buf[p++] !== 0x38 || buf[p++] !== 0x39 || buf[p++] !== 0x61) { // 89a + throw "Invalid GIF 89a header."; + } + + // - Logical Screen Descriptor. + var width = buf[p++] | buf[p++] << 8; + var height = buf[p++] | buf[p++] << 8; + var pf0 = buf[p++]; // . + var global_palette_flag = pf0 >> 7; + var num_global_colors_pow2 = pf0 & 0x7; + var num_global_colors = 1 << (num_global_colors_pow2 + 1); + var background = buf[p++]; + buf[p++]; // Pixel aspect ratio (unused?). + + var global_palette_offset = null; + + if (global_palette_flag) { + global_palette_offset = p; + p += num_global_colors * 3; // Seek past palette. + } + + var loop_count = null; + + var no_eof = true; + + var frames = [ ]; + + var delay = 0; + var transparent_index = null; + var disposal = 0; // 0 - No disposal specified. + var loop_count = null; + + this.width = width; + this.height = height; + + while (no_eof && p < buf.length) { + switch (buf[p++]) { + case 0x21: // Graphics Control Extension Block + switch (buf[p++]) { + case 0xff: // Application specific block + // Try if it's a Netscape block (with animation loop counter). + if (buf[p ] !== 0x0b || // 21 FF already read, check block size. + // NETSCAPE2.0 + buf[p+1 ] == 0x4e && buf[p+2 ] == 0x45 && buf[p+3 ] == 0x54 && + buf[p+4 ] == 0x53 && buf[p+5 ] == 0x43 && buf[p+6 ] == 0x41 && + buf[p+7 ] == 0x50 && buf[p+8 ] == 0x45 && buf[p+9 ] == 0x32 && + buf[p+10] == 0x2e && buf[p+11] == 0x30 && + // Sub-block + buf[p+12] == 0x03 && buf[p+13] == 0x01 && buf[p+16] == 0) { + p += 14; + loop_count = buf[p++] | buf[p++] << 8; + p++; // Skip terminator. + } else { // We don't know what it is, just try to get past it. + p += 12; + while (true) { // Seek through subblocks. + var block_size = buf[p++]; + if (block_size === 0) break; + p += block_size; + } + } + break; + + case 0xf9: // Graphics Control Extension + if (buf[p++] !== 0x4 || buf[p+4] !== 0) + throw "Invalid graphics extension block."; + var pf1 = buf[p++]; + delay = buf[p++] | buf[p++] << 8; + transparent_index = buf[p++]; + if ((pf1 & 1) === 0) transparent_index = null; + disposal = pf1 >> 2 & 0x7; + p++; // Skip terminator. + break; + + case 0xfe: // Comment Extension. + while (true) { // Seek through subblocks. + var block_size = buf[p++]; + if (block_size === 0) break; + // console.log(buf.slice(p, p+block_size).toString('ascii')); + p += block_size; + } + break; + + default: + throw "Unknown graphic control label: 0x" + buf[p-1].toString(16); + } + break; + + case 0x2c: // Image Descriptor. + var x = buf[p++] | buf[p++] << 8; + var y = buf[p++] | buf[p++] << 8; + var w = buf[p++] | buf[p++] << 8; + var h = buf[p++] | buf[p++] << 8; + var pf2 = buf[p++]; + var local_palette_flag = pf2 >> 7; + var num_local_colors_pow2 = pf2 & 0x7; + var num_local_colors = 1 << (num_local_colors_pow2 + 1); + var palette_offset = global_palette_offset; + var has_local_palette = false; + if (local_palette_flag) { + var has_local_palette = true; + palette_offset = p; // Override with local palette. + p += num_local_colors * 3; // Seek past palette. + } + + var data_offset = p; + + p++; // codesize + while (true) { + var block_size = buf[p++]; + if (block_size === 0) break; + p += block_size; + } + + frames.push({x: x, y: y, width: w, height: h, + has_local_palette: has_local_palette, + palette_offset: palette_offset, + data_offset: data_offset, + data_length: p - data_offset, + transparent_index: transparent_index, + delay: delay, + disposal: disposal}); + break; + + case 0x3b: // Trailer Marker (end of file). + no_eof = false; + break; + + default: + throw "Unknown gif block: 0x" + buf[p-1].toString(16); + break; + } + } + + this.numFrames = function() { + return frames.length; + }; + + this.frameInfo = function(frame_num) { + if (frame_num < 0 || frame_num >= frames.length) + throw "Frame index out of range."; + return frames[frame_num]; + } + + this.decodeAndBlitFrameBGRA = function(frame_num, pixels) { + var frame = this.frameInfo(frame_num); + var num_pixels = frame.width * frame.height; + var index_stream = new Uint8Array(num_pixels); // Atmost 8-bit indices. + GifReaderLZWOutputIndexStream( + buf, frame.data_offset, index_stream, num_pixels); + var palette_offset = frame.palette_offset; + + // NOTE(deanm): It seems to be much faster to compare index to 256 than + // to === null. Not sure why, but CompareStub_EQ_STRICT shows up high in + // the profile, not sure if it's related to using a Uint8Array. + var trans = frame.transparent_index; + if (trans === null) trans = 256; + + var wstride = (width - frame.width) * 4; + var op = ((frame.y * width) + frame.x) * 4; // output pointer. + var linex = frame.width; + + for (var i = 0, il = index_stream.length; i < il; ++i) { + var index = index_stream[i]; + + if (index === trans) { + op += 4; + } else { + var r = buf[palette_offset + index * 3]; + var g = buf[palette_offset + index * 3 + 1]; + var b = buf[palette_offset + index * 3 + 2]; + pixels[op++] = b; + pixels[op++] = g; + pixels[op++] = r; + pixels[op++] = 255; + } + + if (--linex === 0) { + op += wstride; + linex = frame.width; + } + } + }; + + // I will go to copy and paste hell one day... + this.decodeAndBlitFrameRGBA = function(frame_num, pixels) { + var frame = this.frameInfo(frame_num); + var num_pixels = frame.width * frame.height; + var index_stream = new Uint8Array(num_pixels); // Atmost 8-bit indices. + GifReaderLZWOutputIndexStream( + buf, frame.data_offset, index_stream, num_pixels); + var op = 0; // output pointer. + var palette_offset = frame.palette_offset; + + // NOTE(deanm): It seems to be much faster to compare index to 256 than + // to === null. Not sure why, but CompareStub_EQ_STRICT shows up high in + // the profile, not sure if it's related to using a Uint8Array. + var trans = frame.transparent_index; + if (trans === null) trans = 256; + + var wstride = (width - frame.width) * 4; + var op = ((frame.y * width) + frame.x) * 4; // output pointer. + var linex = frame.width; + + for (var i = 0, il = index_stream.length; i < il; ++i) { + var index = index_stream[i]; + + if (index === trans) { + op += 4; + } else { + var r = buf[palette_offset + index * 3]; + var g = buf[palette_offset + index * 3 + 1]; + var b = buf[palette_offset + index * 3 + 2]; + pixels[op++] = r; + pixels[op++] = g; + pixels[op++] = b; + pixels[op++] = 255; + } + + if (--linex === 0) { + op += wstride; + linex = frame.width; + } + } + }; +} + +function GifReaderLZWOutputIndexStream(code_stream, p, output, output_length) { + var min_code_size = code_stream[p++]; + + var clear_code = 1 << min_code_size; + var eoi_code = clear_code + 1; + var next_code = eoi_code + 1; + + var cur_code_size = min_code_size + 1; // Number of bits per code. + // NOTE: This shares the same name as the encoder, but has a different + // meaning here. Here this masks each code coming from the code stream. + var code_mask = (1 << cur_code_size) - 1; + var cur_shift = 0; + var cur = 0; + + var op = 0; // Output pointer. + + var subblock_size = code_stream[p++]; + + // TODO(deanm): Would using a TypedArray be any faster? At least it would + // solve the fast mode / backing store uncertainty. + // var code_table = Array(4096); + var code_table = new Int32Array(4096); // Can be signed, we only use 20 bits. + + var prev_code = null; // Track code-1. + + while (true) { + // Read up to two bytes, making sure we always 12-bits for max sized code. + while (cur_shift < 16) { + if (subblock_size === 0) break; // No more data to be read. + + cur |= code_stream[p++] << cur_shift; + cur_shift += 8; + + if (subblock_size === 1) { // Never let it get to 0 to hold logic above. + subblock_size = code_stream[p++]; // Next subblock. + } else { + --subblock_size; + } + } + + // TODO(deanm): We should never really get here, we should have received + // and EOI. + if (cur_shift < cur_code_size) + break; + + var code = cur & code_mask; + cur >>= cur_code_size; + cur_shift -= cur_code_size; + + // TODO(deanm): Maybe should check that the first code was a clear code, + // at least this is what you're supposed to do. But actually our encoder + // now doesn't emit a clear code first anyway. + if (code === clear_code) { + // We don't actually have to clear the table. This could be a good idea + // for greater error checking, but we don't really do any anyway. We + // will just track it with next_code and overwrite old entries. + + next_code = eoi_code + 1; + cur_code_size = min_code_size + 1; + code_mask = (1 << cur_code_size) - 1; + + // Don't update prev_code ? + prev_code = null; + continue; + } else if (code === eoi_code) { + break; + } + + // We have a similar situation as the decoder, where we want to store + // variable length entries (code table entries), but we want to do in a + // faster manner than an array of arrays. The code below stores sort of a + // linked list within the code table, and then "chases" through it to + // construct the dictionary entries. When a new entry is created, just the + // last byte is stored, and the rest (prefix) of the entry is only + // referenced by its table entry. Then the code chases through the + // prefixes until it reaches a single byte code. We have to chase twice, + // first to compute the length, and then to actually copy the data to the + // output (backwards, since we know the length). The alternative would be + // storing something in an intermediate stack, but that doesn't make any + // more sense. I implemented an approach where it also stored the length + // in the code table, although it's a bit tricky because you run out of + // bits (12 + 12 + 8), but I didn't measure much improvements (the table + // entries are generally not the long). Even when I created benchmarks for + // very long table entries the complexity did not seem worth it. + // The code table stores the prefix entry in 12 bits and then the suffix + // byte in 8 bits, so each entry is 20 bits. + + var chase_code = code < next_code ? code : prev_code; + + // Chase what we will output, either {CODE} or {CODE-1}. + var chase_length = 0; + var chase = chase_code; + while (chase > clear_code) { + chase = code_table[chase] >> 8; + ++chase_length; + } + + var k = chase; + + var op_end = op + chase_length + (chase_code !== code ? 1 : 0); + if (op_end > output_length) { + console.log("Warning, gif stream longer than expected."); + return; + } + + // Already have the first byte from the chase, might as well write it fast. + output[op++] = k; + + op += chase_length; + var b = op; // Track pointer, writing backwards. + + if (chase_code !== code) // The case of emitting {CODE-1} + k. + output[op++] = k; + + chase = chase_code; + while (chase_length--) { + chase = code_table[chase]; + output[--b] = chase & 0xff; // Write backwards. + chase >>= 8; // Pull down to the prefix code. + } + + if (prev_code !== null && next_code < 4096) { + code_table[next_code++] = prev_code << 8 | k; + // TODO(deanm): Figure out this clearing vs code growth logic better. I + // have an feeling that it should just happen somewhere else, for now it + // is awkward between when we grow past the max and then hit a clear code. + // For now just check if we hit the max 12-bits (then a clear code should + // follow, also of course encoded in 12-bits). + if (next_code >= code_mask+1 && cur_code_size < 12) { + ++cur_code_size; + code_mask = code_mask << 1 | 1; + } + } + + prev_code = code; + } + + if (op !== output_length) { + console.log("Warning, gif stream shorter than expected."); + } + + return output; +} + +try { exports.GifWriter = GifWriter; exports.GifReader = GifReader } catch(e) { } // CommonJS. diff --git a/lib/stream.js b/lib/stream.js new file mode 100644 index 0000000..8d91ad7 --- /dev/null +++ b/lib/stream.js @@ -0,0 +1,47 @@ +var omggif = require('./omggif.js'); +var NeuQuant = require('./neu_quant.js'); + +// TODO: make it actually a stream + +function GifStream(w, h) { + // HACK: I re-use omggif buffer and hope that 1024 + w*h is enough + // i expect (header + compressed pixels) to be less than uncompressed bitmap + this.buf = new Buffer(1024 + w*h); + this.encoder = new omggif.GifWriter(this.buf, w, h); //, { palette: webpalette, transparent: 255 }); +} + +GifStream.prototype.addFrame = function(x, y, w, h, data) { + var quality = 1; + var rgb_idx = 0; + var rgb = new Buffer(w*h*3); + for(var i=0; i < data.length; i+= 4) { + rgb[rgb_idx++] = data[i + 0]; + rgb[rgb_idx++] = data[i + 1]; + rgb[rgb_idx++] = data[i + 2]; + } + var imgq = new NeuQuant(rgb, quality); + imgq.buildColormap(); + var map = imgq.getColormap(); + var indexed = new Buffer(w*h); + rgb_idx = 0; + var r, g, b; + for (var i=0; i < indexed.length; ++i) { + r = rgb[rgb_idx++]; + g = rgb[rgb_idx++]; + b = rgb[rgb_idx++]; + indexed[i] = imgq.lookupRGB(r, g, b); + } + var palette = new Array(map.length/3); + var map_idx = 0; + for (var i=0; i < palette.length; ++i) { + palette[i] = + (map[map_idx + 0] << 0) + + (map[map_idx + 1] << 8) + + (map[map_idx + 2] << 16); + map_idx += 3; + } + this.encoder.addFrame(x, y, w, h, indexed, {delay: 1, palette: palette}); + return this.encoder.read(); +}; + +module.exports = GifStream; diff --git a/package.json b/package.json index 8d913f7..fb8b292 100644 --- a/package.json +++ b/package.json @@ -25,6 +25,6 @@ "readmeFilename": "README.md", "dependencies": { "rfb2": "0.0.7", - "gif": "2.0.2" + "readable-stream": "~1.0.2" } } diff --git a/vnc-over-gif.js b/vnc-over-gif.js index 2fa7c7a..cb707e4 100755 --- a/vnc-over-gif.js +++ b/vnc-over-gif.js @@ -1,54 +1,27 @@ #!/usr/bin/env node - -var GifLib = require('gif'); var rfb = require('rfb2'); -var url = require('url'); +var GifStream = require('./lib/stream.js'); -var http = require('http'); -http.createServer(function(req, res) { - var params = url.parse(req.url, true); +require('http').createServer(function(req, res) { + var params = require('url').parse(req.url, true); if (params.pathname == '/screen.gif') { - var connectArgs = { - host: params.query.host || 'localhost', - port: params.query.port ? parseInt(params.query.port,10) : 5900, - password: params.query.password || '' - }; - var r = rfb.createConnection(connectArgs); + var r = rfb.createConnection(params.query); var gif; + r.on('connect', function() { res.writeHead(200, { 'Content-Type': 'image/gif'}); - gif = new GifLib.AnimatedGif(r.width, r.height); - gif.setOutputCallback(res.write.bind(res)); - }); - r.on('rect', function(rect) { - // todo: use BGRA buffer type in gif constructor (does not work for me for some reason) - var rgb = new Buffer(rect.width*rect.height*3); - var offset = 0; - for (var i=0; i < rect.buffer.length; i += 4) { - rgb[offset++] = rect.buffer[i+2]; - rgb[offset++] = rect.buffer[i+1]; - rgb[offset++] = rect.buffer[i]; - } - gif.push(rgb, rect.x, rect.y, rect.width, rect.height); - gif.endPush(); - - // send on vnc disconnect? - // gif.end(); - }); - r.on('error', function(err) { - console.error(err); - res.writeHead(502, { 'Content-Type': 'text/plain'}); - if (typeof(err) == 'string') { - res.end(err); - } else if (err instanceof Error) { - res.end(err.message); - } else { - res.end('unknown error:' + JSON.stringify(err)); - } + + gif = new GifStream(r.width, r.height); + //gif.pipe(res); + + r.on('rect', function(rect) { + var data = gif.addFrame(rect.x, rect.y, rect.width, rect.height, rect.data); + r.requestUpdate(true, 0, 0, r.width, r.height); + res.write(data); + }); }); - } else { res.writeHead(404); res.end(); } -}).listen(process.env.PORT || 4444); +}).listen(process.env.PORT || 4444, '0.0.0.0'); From 35e6e415363edbabb5714b70591bfeb85d39b113 Mon Sep 17 00:00:00 2001 From: Andrey Sidorov Date: Fri, 20 Sep 2013 23:46:28 +1000 Subject: [PATCH 2/2] buffer updates in a screen copy, senb batched rectangle with a minimum interval --- lib/stream.js | 13 +- lib/typed_neu_quant.js | 431 +++++++++++++++++++++++++++++++++++++++++ vnc-over-gif.js | 92 ++++++++- 3 files changed, 523 insertions(+), 13 deletions(-) create mode 100644 lib/typed_neu_quant.js diff --git a/lib/stream.js b/lib/stream.js index 8d91ad7..007e74b 100644 --- a/lib/stream.js +++ b/lib/stream.js @@ -1,5 +1,6 @@ var omggif = require('./omggif.js'); -var NeuQuant = require('./neu_quant.js'); +var NeuQuant = require('./typed_neu_quant.js'); +//var NeuQuant = require('./neu_quant.js'); // TODO: make it actually a stream @@ -10,15 +11,9 @@ function GifStream(w, h) { this.encoder = new omggif.GifWriter(this.buf, w, h); //, { palette: webpalette, transparent: 255 }); } -GifStream.prototype.addFrame = function(x, y, w, h, data) { - var quality = 1; +GifStream.prototype.addFrame = function(x, y, w, h, rgb) { + var quality = 29; var rgb_idx = 0; - var rgb = new Buffer(w*h*3); - for(var i=0; i < data.length; i+= 4) { - rgb[rgb_idx++] = data[i + 0]; - rgb[rgb_idx++] = data[i + 1]; - rgb[rgb_idx++] = data[i + 2]; - } var imgq = new NeuQuant(rgb, quality); imgq.buildColormap(); var map = imgq.getColormap(); diff --git a/lib/typed_neu_quant.js b/lib/typed_neu_quant.js new file mode 100644 index 0000000..a41a1d7 --- /dev/null +++ b/lib/typed_neu_quant.js @@ -0,0 +1,431 @@ +/* NeuQuant Neural-Net Quantization Algorithm + * ------------------------------------------ + * + * Copyright (c) 1994 Anthony Dekker + * + * NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994. + * See "Kohonen neural networks for optimal colour quantization" + * in "Network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367. + * for a discussion of the algorithm. + * See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML + * + * Any party obtaining a copy of these files from the author, directly or + * indirectly, is granted, free of charge, a full and unrestricted irrevocable, + * world-wide, paid up, royalty-free, nonexclusive right and license to deal + * in this software and documentation files (the "Software"), including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons who receive + * copies from any such party to do so, with the only requirement being + * that this copyright notice remain intact. + * + * (JavaScript port 2012 by Johan Nordberg) + */ + +var ncycles = 100; // number of learning cycles +var netsize = 256; // number of colors used +var maxnetpos = netsize - 1; + +// defs for freq and bias +var netbiasshift = 4; // bias for colour values +var intbiasshift = 16; // bias for fractions +var intbias = (1 << intbiasshift); +var gammashift = 10; +var gamma = (1 << gammashift); +var betashift = 10; +var beta = (intbias >> betashift); /* beta = 1/1024 */ +var betagamma = (intbias << (gammashift - betashift)); + +// defs for decreasing radius factor +var initrad = (netsize >> 3); // for 256 cols, radius starts +var radiusbiasshift = 6; // at 32.0 biased by 6 bits +var radiusbias = (1 << radiusbiasshift); +var initradius = (initrad * radiusbias); //and decreases by a +var radiusdec = 30; // factor of 1/30 each cycle + +// defs for decreasing alpha factor +var alphabiasshift = 10; // alpha starts at 1.0 +var initalpha = (1 << alphabiasshift); +var alphadec; // biased by 10 bits + +/* radbias and alpharadbias used for radpower calculation */ +var radbiasshift = 8; +var radbias = (1 << radbiasshift); +var alpharadbshift = (alphabiasshift + radbiasshift); +var alpharadbias = (1 << alpharadbshift); + +// four primes near 500 - assume no image has a length so large that it is +// divisible by all four primes +var prime1 = 499; +var prime2 = 491; +var prime3 = 487; +var prime4 = 503; +var minpicturebytes = (3 * prime4); + +/* + Constructor: NeuQuant + + Arguments: + + pixels - array of pixels in RGB format + samplefac - sampling factor 1 to 30 where lower is better quality + + > + > pixels = [r, g, b, r, g, b, r, g, b, ..] + > +*/ +function NeuQuant(pixels, samplefac) { + var network; // int[netsize][4] + var netindex; // for network lookup - really 256 + + // bias and freq arrays for learning + var bias; + var freq; + var radpower; + + /* + Private Method: init + + sets up arrays + */ + function init() { + network = []; + netindex = new Int32Array(256); + bias = new Int32Array(netsize); + freq = new Int32Array(netsize); + radpower = new Int32Array(netsize >> 3); + + var i, v; + for (i = 0; i < netsize; i++) { + v = (i << (netbiasshift + 8)) / netsize; + network[i] = new Float64Array([v, v, v, 0]); + //network[i] = [v, v, v, 0] + freq[i] = intbias / netsize; + bias[i] = 0; + } + } + + /* + Private Method: unbiasnet + + unbiases network to give byte values 0..255 and record position i to prepare for sort + */ + function unbiasnet() { + for (var i = 0; i < netsize; i++) { + network[i][0] >>= netbiasshift; + network[i][1] >>= netbiasshift; + network[i][2] >>= netbiasshift; + network[i][3] = i; // record color number + } + } + + /* + Private Method: altersingle + + moves neuron *i* towards biased (b,g,r) by factor *alpha* + */ + function altersingle(alpha, i, b, g, r) { + network[i][0] -= (alpha * (network[i][0] - b)) / initalpha; + network[i][1] -= (alpha * (network[i][1] - g)) / initalpha; + network[i][2] -= (alpha * (network[i][2] - r)) / initalpha; + } + + /* + Private Method: alterneigh + + moves neurons in *radius* around index *i* towards biased (b,g,r) by factor *alpha* + */ + function alterneigh(radius, i, b, g, r) { + var lo = Math.abs(i - radius); + var hi = Math.min(i + radius, netsize); + + var j = i + 1; + var k = i - 1; + var m = 1; + + var p, a; + while ((j < hi) || (k > lo)) { + a = radpower[m++]; + + if (j < hi) { + p = network[j++]; + p[0] -= (a * (p[0] - b)) / alpharadbias; + p[1] -= (a * (p[1] - g)) / alpharadbias; + p[2] -= (a * (p[2] - r)) / alpharadbias; + } + + if (k > lo) { + p = network[k--]; + p[0] -= (a * (p[0] - b)) / alpharadbias; + p[1] -= (a * (p[1] - g)) / alpharadbias; + p[2] -= (a * (p[2] - r)) / alpharadbias; + } + } + } + + /* + Private Method: contest + + searches for biased BGR values + */ + function contest(b, g, r) { + /* + finds closest neuron (min dist) and updates freq + finds best neuron (min dist-bias) and returns position + for frequently chosen neurons, freq[i] is high and bias[i] is negative + bias[i] = gamma * ((1 / netsize) - freq[i]) + */ + + var bestd = ~(1 << 31); + var bestbiasd = bestd; + var bestpos = -1; + var bestbiaspos = bestpos; + + var i, n, dist, biasdist, betafreq; + for (i = 0; i < netsize; i++) { + n = network[i]; + + dist = Math.abs(n[0] - b) + Math.abs(n[1] - g) + Math.abs(n[2] - r); + if (dist < bestd) { + bestd = dist; + bestpos = i; + } + + biasdist = dist - ((bias[i]) >> (intbiasshift - netbiasshift)); + if (biasdist < bestbiasd) { + bestbiasd = biasdist; + bestbiaspos = i; + } + + betafreq = (freq[i] >> betashift); + freq[i] -= betafreq; + bias[i] += (betafreq << gammashift); + } + + freq[bestpos] += beta; + bias[bestpos] -= betagamma; + + return bestbiaspos; + } + + /* + Private Method: inxbuild + + sorts network and builds netindex[0..255] + */ + function inxbuild() { + var i, j, p, q, smallpos, smallval, previouscol = 0, startpos = 0; + for (i = 0; i < netsize; i++) { + p = network[i]; + smallpos = i; + smallval = p[1]; // index on g + // find smallest in i..netsize-1 + for (j = i + 1; j < netsize; j++) { + q = network[j]; + if (q[1] < smallval) { // index on g + smallpos = j; + smallval = q[1]; // index on g + } + } + q = network[smallpos]; + // swap p (i) and q (smallpos) entries + if (i != smallpos) { + j = q[0]; q[0] = p[0]; p[0] = j; + j = q[1]; q[1] = p[1]; p[1] = j; + j = q[2]; q[2] = p[2]; p[2] = j; + j = q[3]; q[3] = p[3]; p[3] = j; + } + // smallval entry is now in position i + + if (smallval != previouscol) { + netindex[previouscol] = (startpos + i) >> 1; + for (j = previouscol + 1; j < smallval; j++) + netindex[j] = i; + previouscol = smallval; + startpos = i; + } + } + netindex[previouscol] = (startpos + maxnetpos) >> 1; + for (j = previouscol + 1; j < 256; j++) + netindex[j] = maxnetpos; // really 256 + } + + /* + Private Method: inxsearch + + searches for BGR values 0..255 and returns a color index + */ + function inxsearch(b, g, r) { + var a, p, dist; + + var bestd = 1000; // biggest possible dist is 256*3 + var best = -1; + + var i = netindex[g]; // index on g + var j = i - 1; // start at netindex[g] and work outwards + + while ((i < netsize) || (j >= 0)) { + if (i < netsize) { + p = network[i]; + dist = p[1] - g; // inx key + if (dist >= bestd) i = netsize; // stop iter + else { + i++; + if (dist < 0) dist = -dist; + a = p[0] - b; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + a = p[2] - r; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + bestd = dist; + best = p[3]; + } + } + } + } + if (j >= 0) { + p = network[j]; + dist = g - p[1]; // inx key - reverse dif + if (dist >= bestd) j = -1; // stop iter + else { + j--; + if (dist < 0) dist = -dist; + a = p[0] - b; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + a = p[2] - r; if (a < 0) a = -a; + dist += a; + if (dist < bestd) { + bestd = dist; + best = p[3]; + } + } + } + } + } + + return best; + } + + /* + Private Method: learn + + "Main Learning Loop" + */ + function learn() { + var i; + + var lengthcount = pixels.length; + var alphadec = 30 + ((samplefac - 1) / 3); + var samplepixels = lengthcount / (3 * samplefac); + var delta = ~~(samplepixels / ncycles); + var alpha = initalpha; + var radius = initradius; + + var rad = radius >> radiusbiasshift; + + if (rad <= 1) rad = 0; + for (i = 0; i < rad; i++) + radpower[i] = alpha * (((rad * rad - i * i) * radbias) / (rad * rad)); + + var step; + if (lengthcount < minpicturebytes) { + samplefac = 1; + step = 3; + } else if ((lengthcount % prime1) !== 0) { + step = 3 * prime1; + } else if ((lengthcount % prime2) !== 0) { + step = 3 * prime2; + } else if ((lengthcount % prime3) !== 0) { + step = 3 * prime3; + } else { + step = 3 * prime4; + } + + var b, g, r, j; + var pix = 0; // current pixel + + i = 0; + while (i < samplepixels) { + b = (pixels[pix] & 0xff) << netbiasshift; + g = (pixels[pix + 1] & 0xff) << netbiasshift; + r = (pixels[pix + 2] & 0xff) << netbiasshift; + + j = contest(b, g, r); + + altersingle(alpha, j, b, g, r); + if (rad !== 0) alterneigh(rad, j, b, g, r); // alter neighbours + + pix += step; + if (pix >= lengthcount) pix -= lengthcount; + + i++; + + if (delta === 0) delta = 1; + if (i % delta === 0) { + alpha -= alpha / alphadec; + radius -= radius / radiusdec; + rad = radius >> radiusbiasshift; + + if (rad <= 1) rad = 0; + for (j = 0; j < rad; j++) + radpower[j] = alpha * (((rad * rad - j * j) * radbias) / (rad * rad)); + } + } + } + + /* + Method: buildColormap + + 1. initializes network + 2. trains it + 3. removes misconceptions + 4. builds colorindex + */ + function buildColormap() { + init(); + learn(); + unbiasnet(); + inxbuild(); + } + this.buildColormap = buildColormap; + + /* + Method: getColormap + + builds colormap from the index + + returns array in the format: + + > + > [r, g, b, r, g, b, r, g, b, ..] + > + */ + function getColormap() { + var map = []; + var index = []; + + for (var i = 0; i < netsize; i++) + index[network[i][3]] = i; + + var k = 0; + for (var l = 0; l < netsize; l++) { + var j = index[l]; + map[k++] = (network[j][0]); + map[k++] = (network[j][1]); + map[k++] = (network[j][2]); + } + return map; + } + this.getColormap = getColormap; + + /* + Method: lookupRGB + + looks for the closest *r*, *g*, *b* color in the map and + returns its index + */ + this.lookupRGB = inxsearch; +} + +module.exports = NeuQuant; diff --git a/vnc-over-gif.js b/vnc-over-gif.js index cb707e4..59a685d 100755 --- a/vnc-over-gif.js +++ b/vnc-over-gif.js @@ -2,23 +2,107 @@ var rfb = require('rfb2'); var GifStream = require('./lib/stream.js'); +var FRAME_RATE = 100; // minimum update interval + +// calculate update rectangle +function screenDiff(screen1, screen2, w, h) { + + var topLeft = [w, h]; + var bottomRight = [-1, -1]; + + var screen_idx; + for (var y = 0; y < h; ++y) { + for (var x = 0; x < w; ++x) { + screen_idx = (x + y*w)*3; + if (!( + screen1[screen_idx + 0] == screen2[screen_idx + 0] && + screen1[screen_idx + 1] == screen2[screen_idx + 1] && + screen1[screen_idx + 2] == screen2[screen_idx + 2] + )) { + if (x < topLeft[0]) + topLeft[0] = x; + if (y < topLeft[1]) + topLeft[1] = y; + if (x > bottomRight[0]) + bottomRight[0] = x; + if (y > bottomRight[1]) + bottomRight[1] = y; + } + } + } + + var rect = { + x: topLeft[0], + width: bottomRight[0] - topLeft[0], + y: topLeft[1], + height: bottomRight[1] - topLeft[1] + }; + + if (rect.width <= 0 || rect.height <= 0) + return null; + + var screen_idx, rect_idx; + rect.data = new Buffer(rect.width*rect.height*3); + for(var y=rect.y; y < rect.y + rect.height; ++y) { + for(var x=rect.x; x < rect.x + rect.width; ++x) { + screen_idx = (x + y*w)*3; + rect_idx = ((y-rect.y)*rect.width + x - rect.x)*3; + rect.data[rect_idx + 0] = screen2[screen_idx + 0]; + rect.data[rect_idx + 1] = screen2[screen_idx + 1]; + rect.data[rect_idx + 2] = screen2[screen_idx + 2]; + } + } + return rect; +} + +// copy 32 bit rect to 24 bit screen +function drawRect(screen, w, h, rect) { + var screen_idx, rect_idx; + for(var y=rect.y; y < rect.y + rect.height; ++y) { + for(var x=rect.x; x < rect.x + rect.width; ++x) { + screen_idx = (x + y*w)*3; + rect_idx = ((y-rect.y)*rect.width + x - rect.x)*4; + screen[screen_idx + 0] = rect.data[rect_idx + 0]; + screen[screen_idx + 1] = rect.data[rect_idx + 1]; + screen[screen_idx + 2] = rect.data[rect_idx + 2]; + } + } +} + require('http').createServer(function(req, res) { var params = require('url').parse(req.url, true); if (params.pathname == '/screen.gif') { var r = rfb.createConnection(params.query); var gif; + var screenSent, screenCurrent; r.on('connect', function() { res.writeHead(200, { 'Content-Type': 'image/gif'}); gif = new GifStream(r.width, r.height); - //gif.pipe(res); - + screenSent = new Buffer(r.width*r.height*3); + screenCurrent = new Buffer(r.width*r.height*3); + + var lastUpdateTime = +Date.now(); + function sendUpdate() { + if (Date.now() - lastUpdateTime < FRAME_RATE) + return; + var rect = screenDiff(screenSent, screenCurrent, r.width, r.height); + if (rect) { + var data = gif.addFrame(rect.x, rect.y, rect.width, rect.height, rect.data); + console.log(data.length); + res.write(data); + } + screenCurrent.copy(screenSent); + lastUpdateTime = +Date.now(); + } + r.on('rect', function(rect) { - var data = gif.addFrame(rect.x, rect.y, rect.width, rect.height, rect.data); + drawRect(screenCurrent, r.width, r.height, rect); r.requestUpdate(true, 0, 0, r.width, r.height); - res.write(data); + sendUpdate(); }); + }); } else { res.writeHead(404);