瀏覽代碼

修改名字

y595705120 1 年之前
父節點
當前提交
9a7c0d37a3

File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/data/eye-min.js


File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/data/eye.js


File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/data/face-min.js


File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/data/face.js


File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/data/mouth-min.js


File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/data/mouth.js


File diff suppressed because it is too large
+ 7 - 0
src/assets/tracking/build/tracking-min.js


+ 3111 - 0
src/assets/tracking/build/tracking.js

@@ -0,0 +1,3111 @@
+/**
+ * tracking - A modern approach for Computer Vision on the web.
+ * @author Eduardo Lundgren <edu@rdo.io>
+ * @version v1.1.3
+ * @link http://trackingjs.com
+ * @license BSD
+ */
+(function(window, undefined) {
+  window.tracking = window.tracking || {};
+
+  /**
+   * Inherit the prototype methods from one constructor into another.
+   *
+   * Usage:
+   * <pre>
+   * function ParentClass(a, b) { }
+   * ParentClass.prototype.foo = function(a) { }
+   *
+   * function ChildClass(a, b, c) {
+   *   tracking.base(this, a, b);
+   * }
+   * tracking.inherits(ChildClass, ParentClass);
+   *
+   * var child = new ChildClass('a', 'b', 'c');
+   * child.foo();
+   * </pre>
+   *
+   * @param {Function} childCtor Child class.
+   * @param {Function} parentCtor Parent class.
+   */
+  tracking.inherits = function(childCtor, parentCtor) {
+    function TempCtor() {
+    }
+    TempCtor.prototype = parentCtor.prototype;
+    childCtor.superClass_ = parentCtor.prototype;
+    childCtor.prototype = new TempCtor();
+    childCtor.prototype.constructor = childCtor;
+
+    /**
+     * Calls superclass constructor/method.
+     *
+     * This function is only available if you use tracking.inherits to express
+     * inheritance relationships between classes.
+     *
+     * @param {!object} me Should always be "this".
+     * @param {string} methodName The method name to call. Calling superclass
+     *     constructor can be done with the special string 'constructor'.
+     * @param {...*} var_args The arguments to pass to superclass
+     *     method/constructor.
+     * @return {*} The return value of the superclass method/constructor.
+     */
+    childCtor.base = function(me, methodName) {
+      var args = Array.prototype.slice.call(arguments, 2);
+      return parentCtor.prototype[methodName].apply(me, args);
+    };
+  };
+
+  /**
+   * Captures the user camera when tracking a video element and set its source
+   * to the camera stream.
+   * @param {HTMLVideoElement} element Canvas element to track.
+   * @param {object} opt_options Optional configuration to the tracker.
+   */
+  tracking.initUserMedia_ = function(element, opt_options) {
+    window.navigator.mediaDevices.getUserMedia({
+      video: true,
+      audio: (opt_options && opt_options.audio) ? true : false,
+    }).then(function(stream) {
+      element.srcObject = stream;
+    }).catch(function(err) {
+      throw Error('Cannot capture user camera.');
+    });
+  };
+
+  /**
+   * Tests whether the object is a dom node.
+   * @param {object} o Object to be tested.
+   * @return {boolean} True if the object is a dom node.
+   */
+  tracking.isNode = function(o) {
+    return o.nodeType || this.isWindow(o);
+  };
+
+  /**
+   * Tests whether the object is the `window` object.
+   * @param {object} o Object to be tested.
+   * @return {boolean} True if the object is the `window` object.
+   */
+  tracking.isWindow = function(o) {
+    return !!(o && o.alert && o.document);
+  };
+
+  /**
+   * Selects a dom node from a CSS3 selector using `document.querySelector`.
+   * @param {string} selector
+   * @param {object} opt_element The root element for the query. When not
+   *     specified `document` is used as root element.
+   * @return {HTMLElement} The first dom element that matches to the selector.
+   *     If not found, returns `null`.
+   */
+  tracking.one = function(selector, opt_element) {
+    if (this.isNode(selector)) {
+      return selector;
+    }
+    return (opt_element || document).querySelector(selector);
+  };
+
+  /**
+   * Tracks a canvas, image or video element based on the specified `tracker`
+   * instance. This method extract the pixel information of the input element
+   * to pass to the `tracker` instance. When tracking a video, the
+   * `tracker.track(pixels, width, height)` will be in a
+   * `requestAnimationFrame` loop in order to track all video frames.
+   *
+   * Example:
+   * var tracker = new tracking.ColorTracker();
+   *
+   * tracking.track('#video', tracker);
+   * or
+   * tracking.track('#video', tracker, { camera: true });
+   *
+   * tracker.on('track', function(event) {
+   *   // console.log(event.data[0].x, event.data[0].y)
+   * });
+   *
+   * @param {HTMLElement} element The element to track, canvas, image or
+   *     video.
+   * @param {tracking.Tracker} tracker The tracker instance used to track the
+   *     element.
+   * @param {object} opt_options Optional configuration to the tracker.
+   */
+  tracking.track = function(element, tracker, opt_options) {
+    element = tracking.one(element);
+    if (!element) {
+      throw new Error('Element not found, try a different element or selector.');
+    }
+    if (!tracker) {
+      throw new Error('Tracker not specified, try `tracking.track(element, new tracking.FaceTracker())`.');
+    }
+
+    switch (element.nodeName.toLowerCase()) {
+      case 'canvas':
+        return this.trackCanvas_(element, tracker, opt_options);
+      case 'img':
+        return this.trackImg_(element, tracker, opt_options);
+      case 'video':
+        if (opt_options) {
+          if (opt_options.camera) {
+            this.initUserMedia_(element, opt_options);
+          }
+        }
+        return this.trackVideo_(element, tracker, opt_options);
+      default:
+        throw new Error('Element not supported, try in a canvas, img, or video.');
+    }
+  };
+
+  /**
+   * Tracks a canvas element based on the specified `tracker` instance and
+   * returns a `TrackerTask` for this track.
+   * @param {HTMLCanvasElement} element Canvas element to track.
+   * @param {tracking.Tracker} tracker The tracker instance used to track the
+   *     element.
+   * @param {object} opt_options Optional configuration to the tracker.
+   * @return {tracking.TrackerTask}
+   * @private
+   */
+  tracking.trackCanvas_ = function(element, tracker) {
+    var self = this;
+    var task = new tracking.TrackerTask(tracker);
+    task.on('run', function() {
+      self.trackCanvasInternal_(element, tracker);
+    });
+    return task.run();
+  };
+
+  /**
+   * Tracks a canvas element based on the specified `tracker` instance. This
+   * method extract the pixel information of the input element to pass to the
+   * `tracker` instance.
+   * @param {HTMLCanvasElement} element Canvas element to track.
+   * @param {tracking.Tracker} tracker The tracker instance used to track the
+   *     element.
+   * @param {object} opt_options Optional configuration to the tracker.
+   * @private
+   */
+  tracking.trackCanvasInternal_ = function(element, tracker) {
+    var width = element.width;
+    var height = element.height;
+    var context = element.getContext('2d');
+    var imageData = context.getImageData(0, 0, width, height);
+    tracker.track(imageData.data, width, height);
+  };
+
+  /**
+   * Tracks a image element based on the specified `tracker` instance. This
+   * method extract the pixel information of the input element to pass to the
+   * `tracker` instance.
+   * @param {HTMLImageElement} element Canvas element to track.
+   * @param {tracking.Tracker} tracker The tracker instance used to track the
+   *     element.
+   * @param {object} opt_options Optional configuration to the tracker.
+   * @private
+   */
+  tracking.trackImg_ = function(element, tracker) {
+    var width = element.width;
+    var height = element.height;
+    var canvas = document.createElement('canvas');
+
+    canvas.width = width;
+    canvas.height = height;
+
+    var task = new tracking.TrackerTask(tracker);
+    task.on('run', function() {
+      tracking.Canvas.loadImage(canvas, element.src, 0, 0, width, height, function() {
+        tracking.trackCanvasInternal_(canvas, tracker);
+      });
+    });
+    return task.run();
+  };
+
+  /**
+   * Tracks a video element based on the specified `tracker` instance. This
+   * method extract the pixel information of the input element to pass to the
+   * `tracker` instance. The `tracker.track(pixels, width, height)` will be in
+   * a `requestAnimationFrame` loop in order to track all video frames.
+   * @param {HTMLVideoElement} element Canvas element to track.
+   * @param {tracking.Tracker} tracker The tracker instance used to track the
+   *     element.
+   * @param {object} opt_options Optional configuration to the tracker.
+   * @private
+   */
+  tracking.trackVideo_ = function(element, tracker) {
+    var canvas = document.createElement('canvas');
+    var context = canvas.getContext('2d');
+    var width;
+    var height;
+
+    var resizeCanvas_ = function() {
+      width = element.offsetWidth;
+      height = element.offsetHeight;
+      canvas.width = width;
+      canvas.height = height;
+    };
+    resizeCanvas_();
+    element.addEventListener('resize', resizeCanvas_);
+
+    var requestId;
+    var requestAnimationFrame_ = function() {
+      requestId = window.requestAnimationFrame(function() {
+        if (element.readyState === element.HAVE_ENOUGH_DATA) {
+          try {
+            // Firefox v~30.0 gets confused with the video readyState firing an
+            // erroneous HAVE_ENOUGH_DATA just before HAVE_CURRENT_DATA state,
+            // hence keep trying to read it until resolved.
+            context.drawImage(element, 0, 0, width, height);
+          } catch (err) {}
+          tracking.trackCanvasInternal_(canvas, tracker);
+        }
+        requestAnimationFrame_();
+      });
+    };
+
+    var task = new tracking.TrackerTask(tracker);
+    task.on('stop', function() {
+      window.cancelAnimationFrame(requestId);
+    });
+    task.on('run', function() {
+      requestAnimationFrame_();
+    });
+    return task.run();
+  };
+
+  // Browser polyfills
+  //===================
+
+  if (!window.URL) {
+    window.URL = window.URL || window.webkitURL || window.msURL || window.oURL;
+  }
+
+  if (!navigator.getUserMedia) {
+    navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
+    navigator.mozGetUserMedia || navigator.msGetUserMedia;
+  }
+}(window));
+
+(function() {
+  /**
+   * EventEmitter utility.
+   * @constructor
+   */
+  tracking.EventEmitter = function() {};
+
+  /**
+   * Holds event listeners scoped by event type.
+   * @type {object}
+   * @private
+   */
+  tracking.EventEmitter.prototype.events_ = null;
+
+  /**
+   * Adds a listener to the end of the listeners array for the specified event.
+   * @param {string} event
+   * @param {function} listener
+   * @return {object} Returns emitter, so calls can be chained.
+   */
+  tracking.EventEmitter.prototype.addListener = function(event, listener) {
+    if (typeof listener !== 'function') {
+      throw new TypeError('Listener must be a function');
+    }
+    if (!this.events_) {
+      this.events_ = {};
+    }
+
+    this.emit('newListener', event, listener);
+
+    if (!this.events_[event]) {
+      this.events_[event] = [];
+    }
+
+    this.events_[event].push(listener);
+
+    return this;
+  };
+
+  /**
+   * Returns an array of listeners for the specified event.
+   * @param {string} event
+   * @return {array} Array of listeners.
+   */
+  tracking.EventEmitter.prototype.listeners = function(event) {
+    return this.events_ && this.events_[event];
+  };
+
+  /**
+   * Execute each of the listeners in order with the supplied arguments.
+   * @param {string} event
+   * @param {*} opt_args [arg1], [arg2], [...]
+   * @return {boolean} Returns true if event had listeners, false otherwise.
+   */
+  tracking.EventEmitter.prototype.emit = function(event) {
+    var listeners = this.listeners(event);
+    if (listeners) {
+      var args = Array.prototype.slice.call(arguments, 1);
+      for (var i = 0; i < listeners.length; i++) {
+        if (listeners[i]) {
+          listeners[i].apply(this, args);
+        }
+      }
+      return true;
+    }
+    return false;
+  };
+
+  /**
+   * Adds a listener to the end of the listeners array for the specified event.
+   * @param {string} event
+   * @param {function} listener
+   * @return {object} Returns emitter, so calls can be chained.
+   */
+  tracking.EventEmitter.prototype.on = tracking.EventEmitter.prototype.addListener;
+
+  /**
+   * Adds a one time listener for the event. This listener is invoked only the
+   * next time the event is fired, after which it is removed.
+   * @param {string} event
+   * @param {function} listener
+   * @return {object} Returns emitter, so calls can be chained.
+   */
+  tracking.EventEmitter.prototype.once = function(event, listener) {
+    var self = this;
+    self.on(event, function handlerInternal() {
+      self.removeListener(event, handlerInternal);
+      listener.apply(this, arguments);
+    });
+  };
+
+  /**
+   * Removes all listeners, or those of the specified event. It's not a good
+   * idea to remove listeners that were added elsewhere in the code,
+   * especially when it's on an emitter that you didn't create.
+   * @param {string} event
+   * @return {object} Returns emitter, so calls can be chained.
+   */
+  tracking.EventEmitter.prototype.removeAllListeners = function(opt_event) {
+    if (!this.events_) {
+      return this;
+    }
+    if (opt_event) {
+      delete this.events_[opt_event];
+    } else {
+      delete this.events_;
+    }
+    return this;
+  };
+
+  /**
+   * Remove a listener from the listener array for the specified event.
+   * Caution: changes array indices in the listener array behind the listener.
+   * @param {string} event
+   * @param {function} listener
+   * @return {object} Returns emitter, so calls can be chained.
+   */
+  tracking.EventEmitter.prototype.removeListener = function(event, listener) {
+    if (typeof listener !== 'function') {
+      throw new TypeError('Listener must be a function');
+    }
+    if (!this.events_) {
+      return this;
+    }
+
+    var listeners = this.listeners(event);
+    if (Array.isArray(listeners)) {
+      var i = listeners.indexOf(listener);
+      if (i < 0) {
+        return this;
+      }
+      listeners.splice(i, 1);
+    }
+
+    return this;
+  };
+
+  /**
+   * By default EventEmitters will print a warning if more than 10 listeners
+   * are added for a particular event. This is a useful default which helps
+   * finding memory leaks. Obviously not all Emitters should be limited to 10.
+   * This function allows that to be increased. Set to zero for unlimited.
+   * @param {number} n The maximum number of listeners.
+   */
+  tracking.EventEmitter.prototype.setMaxListeners = function() {
+    throw new Error('Not implemented');
+  };
+
+}());
+
+(function() {
+  /**
+   * Canvas utility.
+   * @static
+   * @constructor
+   */
+  tracking.Canvas = {};
+
+  /**
+   * Loads an image source into the canvas.
+   * @param {HTMLCanvasElement} canvas The canvas dom element.
+   * @param {string} src The image source.
+   * @param {number} x The canvas horizontal coordinate to load the image.
+   * @param {number} y The canvas vertical coordinate to load the image.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {function} opt_callback Callback that fires when the image is loaded
+   *     into the canvas.
+   * @static
+   */
+  tracking.Canvas.loadImage = function(canvas, src, x, y, width, height, opt_callback) {
+    var instance = this;
+    var img = new window.Image();
+    img.crossOrigin = '*';
+    img.onload = function() {
+      var context = canvas.getContext('2d');
+      canvas.width = width;
+      canvas.height = height;
+      context.drawImage(img, x, y, width, height);
+      if (opt_callback) {
+        opt_callback.call(instance);
+      }
+      img = null;
+    };
+    img.src = src;
+  };
+}());
+
+(function() {
+  /**
+   * DisjointSet utility with path compression. Some applications involve
+   * grouping n distinct objects into a collection of disjoint sets. Two
+   * important operations are then finding which set a given object belongs to
+   * and uniting the two sets. A disjoint set data structure maintains a
+   * collection S={ S1 , S2 ,..., Sk } of disjoint dynamic sets. Each set is
+   * identified by a representative, which usually is a member in the set.
+   * @static
+   * @constructor
+   */
+  tracking.DisjointSet = function(length) {
+    if (length === undefined) {
+      throw new Error('DisjointSet length not specified.');
+    }
+    this.length = length;
+    this.parent = new Uint32Array(length);
+    for (var i = 0; i < length; i++) {
+      this.parent[i] = i;
+    }
+  };
+
+  /**
+   * Holds the length of the internal set.
+   * @type {number}
+   */
+  tracking.DisjointSet.prototype.length = null;
+
+  /**
+   * Holds the set containing the representative values.
+   * @type {Array.<number>}
+   */
+  tracking.DisjointSet.prototype.parent = null;
+
+  /**
+   * Finds a pointer to the representative of the set containing i.
+   * @param {number} i
+   * @return {number} The representative set of i.
+   */
+  tracking.DisjointSet.prototype.find = function(i) {
+    if (this.parent[i] === i) {
+      return i;
+    } else {
+      return (this.parent[i] = this.find(this.parent[i]));
+    }
+  };
+
+  /**
+   * Unites two dynamic sets containing objects i and j, say Si and Sj, into
+   * a new set that Si ∪ Sj, assuming that Si ∩ Sj = ∅;
+   * @param {number} i
+   * @param {number} j
+   */
+  tracking.DisjointSet.prototype.union = function(i, j) {
+    var iRepresentative = this.find(i);
+    var jRepresentative = this.find(j);
+    this.parent[iRepresentative] = jRepresentative;
+  };
+
+}());
+
+(function() {
+  /**
+   * Image utility.
+   * @static
+   * @constructor
+   */
+  tracking.Image = {};
+
+  /**
+   * Computes gaussian blur. Adapted from
+   * https://github.com/kig/canvasfilters.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {number} diameter Gaussian blur diameter, must be greater than 1.
+   * @return {array} The edge pixels in a linear [r,g,b,a,...] array.
+   */
+  tracking.Image.blur = function(pixels, width, height, diameter) {
+    diameter = Math.abs(diameter);
+    if (diameter <= 1) {
+      throw new Error('Diameter should be greater than 1.');
+    }
+    var radius = diameter / 2;
+    var len = Math.ceil(diameter) + (1 - (Math.ceil(diameter) % 2));
+    var weights = new Float32Array(len);
+    var rho = (radius + 0.5) / 3;
+    var rhoSq = rho * rho;
+    var gaussianFactor = 1 / Math.sqrt(2 * Math.PI * rhoSq);
+    var rhoFactor = -1 / (2 * rho * rho);
+    var wsum = 0;
+    var middle = Math.floor(len / 2);
+    for (var i = 0; i < len; i++) {
+      var x = i - middle;
+      var gx = gaussianFactor * Math.exp(x * x * rhoFactor);
+      weights[i] = gx;
+      wsum += gx;
+    }
+    for (var j = 0; j < weights.length; j++) {
+      weights[j] /= wsum;
+    }
+    return this.separableConvolve(pixels, width, height, weights, weights, false);
+  };
+
+  /**
+   * Computes the integral image for summed, squared, rotated and sobel pixels.
+   * @param {array} pixels The pixels in a linear [r,g,b,a,...] array to loop
+   *     through.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {array} opt_integralImage Empty array of size `width * height` to
+   *     be filled with the integral image values. If not specified compute sum
+   *     values will be skipped.
+   * @param {array} opt_integralImageSquare Empty array of size `width *
+   *     height` to be filled with the integral image squared values. If not
+   *     specified compute squared values will be skipped.
+   * @param {array} opt_tiltedIntegralImage Empty array of size `width *
+   *     height` to be filled with the rotated integral image values. If not
+   *     specified compute sum values will be skipped.
+   * @param {array} opt_integralImageSobel Empty array of size `width *
+   *     height` to be filled with the integral image of sobel values. If not
+   *     specified compute sobel filtering will be skipped.
+   * @static
+   */
+  tracking.Image.computeIntegralImage = function(pixels, width, height, opt_integralImage, opt_integralImageSquare, opt_tiltedIntegralImage, opt_integralImageSobel) {
+    if (arguments.length < 4) {
+      throw new Error('You should specify at least one output array in the order: sum, square, tilted, sobel.');
+    }
+    var pixelsSobel;
+    if (opt_integralImageSobel) {
+      pixelsSobel = tracking.Image.sobel(pixels, width, height);
+    }
+    for (var i = 0; i < height; i++) {
+      for (var j = 0; j < width; j++) {
+        var w = i * width * 4 + j * 4;
+        var pixel = ~~(pixels[w] * 0.299 + pixels[w + 1] * 0.587 + pixels[w + 2] * 0.114);
+        if (opt_integralImage) {
+          this.computePixelValueSAT_(opt_integralImage, width, i, j, pixel);
+        }
+        if (opt_integralImageSquare) {
+          this.computePixelValueSAT_(opt_integralImageSquare, width, i, j, pixel * pixel);
+        }
+        if (opt_tiltedIntegralImage) {
+          var w1 = w - width * 4;
+          var pixelAbove = ~~(pixels[w1] * 0.299 + pixels[w1 + 1] * 0.587 + pixels[w1 + 2] * 0.114);
+          this.computePixelValueRSAT_(opt_tiltedIntegralImage, width, i, j, pixel, pixelAbove || 0);
+        }
+        if (opt_integralImageSobel) {
+          this.computePixelValueSAT_(opt_integralImageSobel, width, i, j, pixelsSobel[w]);
+        }
+      }
+    }
+  };
+
+  /**
+   * Helper method to compute the rotated summed area table (RSAT) by the
+   * formula:
+   *
+   * RSAT(x, y) = RSAT(x-1, y-1) + RSAT(x+1, y-1) - RSAT(x, y-2) + I(x, y) + I(x, y-1)
+   *
+   * @param {number} width The image width.
+   * @param {array} RSAT Empty array of size `width * height` to be filled with
+   *     the integral image values. If not specified compute sum values will be
+   *     skipped.
+   * @param {number} i Vertical position of the pixel to be evaluated.
+   * @param {number} j Horizontal position of the pixel to be evaluated.
+   * @param {number} pixel Pixel value to be added to the integral image.
+   * @static
+   * @private
+   */
+  tracking.Image.computePixelValueRSAT_ = function(RSAT, width, i, j, pixel, pixelAbove) {
+    var w = i * width + j;
+    RSAT[w] = (RSAT[w - width - 1] || 0) + (RSAT[w - width + 1] || 0) - (RSAT[w - width - width] || 0) + pixel + pixelAbove;
+  };
+
+  /**
+   * Helper method to compute the summed area table (SAT) by the formula:
+   *
+   * SAT(x, y) = SAT(x, y-1) + SAT(x-1, y) + I(x, y) - SAT(x-1, y-1)
+   *
+   * @param {number} width The image width.
+   * @param {array} SAT Empty array of size `width * height` to be filled with
+   *     the integral image values. If not specified compute sum values will be
+   *     skipped.
+   * @param {number} i Vertical position of the pixel to be evaluated.
+   * @param {number} j Horizontal position of the pixel to be evaluated.
+   * @param {number} pixel Pixel value to be added to the integral image.
+   * @static
+   * @private
+   */
+  tracking.Image.computePixelValueSAT_ = function(SAT, width, i, j, pixel) {
+    var w = i * width + j;
+    SAT[w] = (SAT[w - width] || 0) + (SAT[w - 1] || 0) + pixel - (SAT[w - width - 1] || 0);
+  };
+
+  /**
+   * Converts a color from a colorspace based on an RGB color model to a
+   * grayscale representation of its luminance. The coefficients represent the
+   * measured intensity perception of typical trichromat humans, in
+   * particular, human vision is most sensitive to green and least sensitive
+   * to blue.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {boolean} fillRGBA If the result should fill all RGBA values with the gray scale
+   *  values, instead of returning a single value per pixel.
+   * @param {Uint8ClampedArray} The grayscale pixels in a linear array ([p,p,p,a,...] if fillRGBA
+   *  is true and [p1, p2, p3, ...] if fillRGBA is false).
+   * @static
+   */
+  tracking.Image.grayscale = function(pixels, width, height, fillRGBA) {
+    var gray = new Uint8ClampedArray(fillRGBA ? pixels.length : pixels.length >> 2);
+    var p = 0;
+    var w = 0;
+    for (var i = 0; i < height; i++) {
+      for (var j = 0; j < width; j++) {
+        var value = pixels[w] * 0.299 + pixels[w + 1] * 0.587 + pixels[w + 2] * 0.114;
+        gray[p++] = value;
+
+        if (fillRGBA) {
+          gray[p++] = value;
+          gray[p++] = value;
+          gray[p++] = pixels[w + 3];
+        }
+
+        w += 4;
+      }
+    }
+    return gray;
+  };
+
+  /**
+   * Fast horizontal separable convolution. A point spread function (PSF) is
+   * said to be separable if it can be broken into two one-dimensional
+   * signals: a vertical and a horizontal projection. The convolution is
+   * performed by sliding the kernel over the image, generally starting at the
+   * top left corner, so as to move the kernel through all the positions where
+   * the kernel fits entirely within the boundaries of the image. Adapted from
+   * https://github.com/kig/canvasfilters.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {array} weightsVector The weighting vector, e.g [-1,0,1].
+   * @param {number} opaque
+   * @return {array} The convoluted pixels in a linear [r,g,b,a,...] array.
+   */
+  tracking.Image.horizontalConvolve = function(pixels, width, height, weightsVector, opaque) {
+    var side = weightsVector.length;
+    var halfSide = Math.floor(side / 2);
+    var output = new Float32Array(width * height * 4);
+    var alphaFac = opaque ? 1 : 0;
+
+    for (var y = 0; y < height; y++) {
+      for (var x = 0; x < width; x++) {
+        var sy = y;
+        var sx = x;
+        var offset = (y * width + x) * 4;
+        var r = 0;
+        var g = 0;
+        var b = 0;
+        var a = 0;
+        for (var cx = 0; cx < side; cx++) {
+          var scy = sy;
+          var scx = Math.min(width - 1, Math.max(0, sx + cx - halfSide));
+          var poffset = (scy * width + scx) * 4;
+          var wt = weightsVector[cx];
+          r += pixels[poffset] * wt;
+          g += pixels[poffset + 1] * wt;
+          b += pixels[poffset + 2] * wt;
+          a += pixels[poffset + 3] * wt;
+        }
+        output[offset] = r;
+        output[offset + 1] = g;
+        output[offset + 2] = b;
+        output[offset + 3] = a + alphaFac * (255 - a);
+      }
+    }
+    return output;
+  };
+
+  /**
+   * Fast vertical separable convolution. A point spread function (PSF) is
+   * said to be separable if it can be broken into two one-dimensional
+   * signals: a vertical and a horizontal projection. The convolution is
+   * performed by sliding the kernel over the image, generally starting at the
+   * top left corner, so as to move the kernel through all the positions where
+   * the kernel fits entirely within the boundaries of the image. Adapted from
+   * https://github.com/kig/canvasfilters.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {array} weightsVector The weighting vector, e.g [-1,0,1].
+   * @param {number} opaque
+   * @return {array} The convoluted pixels in a linear [r,g,b,a,...] array.
+   */
+  tracking.Image.verticalConvolve = function(pixels, width, height, weightsVector, opaque) {
+    var side = weightsVector.length;
+    var halfSide = Math.floor(side / 2);
+    var output = new Float32Array(width * height * 4);
+    var alphaFac = opaque ? 1 : 0;
+
+    for (var y = 0; y < height; y++) {
+      for (var x = 0; x < width; x++) {
+        var sy = y;
+        var sx = x;
+        var offset = (y * width + x) * 4;
+        var r = 0;
+        var g = 0;
+        var b = 0;
+        var a = 0;
+        for (var cy = 0; cy < side; cy++) {
+          var scy = Math.min(height - 1, Math.max(0, sy + cy - halfSide));
+          var scx = sx;
+          var poffset = (scy * width + scx) * 4;
+          var wt = weightsVector[cy];
+          r += pixels[poffset] * wt;
+          g += pixels[poffset + 1] * wt;
+          b += pixels[poffset + 2] * wt;
+          a += pixels[poffset + 3] * wt;
+        }
+        output[offset] = r;
+        output[offset + 1] = g;
+        output[offset + 2] = b;
+        output[offset + 3] = a + alphaFac * (255 - a);
+      }
+    }
+    return output;
+  };
+
+  /**
+   * Fast separable convolution. A point spread function (PSF) is said to be
+   * separable if it can be broken into two one-dimensional signals: a
+   * vertical and a horizontal projection. The convolution is performed by
+   * sliding the kernel over the image, generally starting at the top left
+   * corner, so as to move the kernel through all the positions where the
+   * kernel fits entirely within the boundaries of the image. Adapted from
+   * https://github.com/kig/canvasfilters.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {array} horizWeights The horizontal weighting vector, e.g [-1,0,1].
+   * @param {array} vertWeights The vertical vector, e.g [-1,0,1].
+   * @param {number} opaque
+   * @return {array} The convoluted pixels in a linear [r,g,b,a,...] array.
+   */
+  tracking.Image.separableConvolve = function(pixels, width, height, horizWeights, vertWeights, opaque) {
+    var vertical = this.verticalConvolve(pixels, width, height, vertWeights, opaque);
+    return this.horizontalConvolve(vertical, width, height, horizWeights, opaque);
+  };
+
+  /**
+   * Compute image edges using Sobel operator. Computes the vertical and
+   * horizontal gradients of the image and combines the computed images to
+   * find edges in the image. The way we implement the Sobel filter here is by
+   * first grayscaling the image, then taking the horizontal and vertical
+   * gradients and finally combining the gradient images to make up the final
+   * image. Adapted from https://github.com/kig/canvasfilters.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @return {array} The edge pixels in a linear [r,g,b,a,...] array.
+   */
+  tracking.Image.sobel = function(pixels, width, height) {
+    pixels = this.grayscale(pixels, width, height, true);
+    var output = new Float32Array(width * height * 4);
+    var sobelSignVector = new Float32Array([-1, 0, 1]);
+    var sobelScaleVector = new Float32Array([1, 2, 1]);
+    var vertical = this.separableConvolve(pixels, width, height, sobelSignVector, sobelScaleVector);
+    var horizontal = this.separableConvolve(pixels, width, height, sobelScaleVector, sobelSignVector);
+
+    for (var i = 0; i < output.length; i += 4) {
+      var v = vertical[i];
+      var h = horizontal[i];
+      var p = Math.sqrt(h * h + v * v);
+      output[i] = p;
+      output[i + 1] = p;
+      output[i + 2] = p;
+      output[i + 3] = 255;
+    }
+
+    return output;
+  };
+
+  /**
+   * Equalizes the histogram of a grayscale image, normalizing the
+   * brightness and increasing the contrast of the image.
+   * @param {pixels} pixels The grayscale pixels in a linear array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @return {array} The equalized grayscale pixels in a linear array.
+   */
+  tracking.Image.equalizeHist = function(pixels, width, height){
+    var equalized = new Uint8ClampedArray(pixels.length);
+
+    var histogram = new Array(256);
+    for(var i=0; i < 256; i++) histogram[i] = 0;
+
+    for(var i=0; i < pixels.length; i++){
+      equalized[i] = pixels[i];
+      histogram[pixels[i]]++;
+    }
+
+    var prev = histogram[0];
+    for(var i=0; i < 256; i++){
+      histogram[i] += prev;
+      prev = histogram[i];
+    }
+
+    var norm = 255 / pixels.length;
+    for(var i=0; i < pixels.length; i++)
+      equalized[i] = (histogram[pixels[i]] * norm + 0.5) | 0;
+
+    return equalized;
+  }
+
+}());
+
+(function() {
+  /**
+   * ViolaJones utility.
+   * @static
+   * @constructor
+   */
+  tracking.ViolaJones = {};
+
+  /**
+   * Holds the minimum area of intersection that defines when a rectangle is
+   * from the same group. Often when a face is matched multiple rectangles are
+   * classified as possible rectangles to represent the face, when they
+   * intersects they are grouped as one face.
+   * @type {number}
+   * @default 0.5
+   * @static
+   */
+  tracking.ViolaJones.REGIONS_OVERLAP = 0.5;
+
+  /**
+   * Holds the HAAR cascade classifiers converted from OpenCV training.
+   * @type {array}
+   * @static
+   */
+  tracking.ViolaJones.classifiers = {};
+
+  /**
+   * Detects through the HAAR cascade data rectangles matches.
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {number} initialScale The initial scale to start the block
+   *     scaling.
+   * @param {number} scaleFactor The scale factor to scale the feature block.
+   * @param {number} stepSize The block step size.
+   * @param {number} edgesDensity Percentage density edges inside the
+   *     classifier block. Value from [0.0, 1.0], defaults to 0.2. If specified
+   *     edge detection will be applied to the image to prune dead areas of the
+   *     image, this can improve significantly performance.
+   * @param {number} data The HAAR cascade data.
+   * @return {array} Found rectangles.
+   * @static
+   */
+  tracking.ViolaJones.detect = function(pixels, width, height, initialScale, scaleFactor, stepSize, edgesDensity, data) {
+    var total = 0;
+    var rects = [];
+    var integralImage = new Int32Array(width * height);
+    var integralImageSquare = new Int32Array(width * height);
+    var tiltedIntegralImage = new Int32Array(width * height);
+
+    var integralImageSobel;
+    if (edgesDensity > 0) {
+      integralImageSobel = new Int32Array(width * height);
+    }
+
+    tracking.Image.computeIntegralImage(pixels, width, height, integralImage, integralImageSquare, tiltedIntegralImage, integralImageSobel);
+
+    var minWidth = data[0];
+    var minHeight = data[1];
+    var scale = initialScale * scaleFactor;
+    var blockWidth = (scale * minWidth) | 0;
+    var blockHeight = (scale * minHeight) | 0;
+
+    while (blockWidth < width && blockHeight < height) {
+      var step = (scale * stepSize + 0.5) | 0;
+      for (var i = 0; i < (height - blockHeight); i += step) {
+        for (var j = 0; j < (width - blockWidth); j += step) {
+
+          if (edgesDensity > 0) {
+            if (this.isTriviallyExcluded(edgesDensity, integralImageSobel, i, j, width, blockWidth, blockHeight)) {
+              continue;
+            }
+          }
+
+          if (this.evalStages_(data, integralImage, integralImageSquare, tiltedIntegralImage, i, j, width, blockWidth, blockHeight, scale)) {
+            rects[total++] = {
+              width: blockWidth,
+              height: blockHeight,
+              x: j,
+              y: i
+            };
+          }
+        }
+      }
+
+      scale *= scaleFactor;
+      blockWidth = (scale * minWidth) | 0;
+      blockHeight = (scale * minHeight) | 0;
+    }
+    return this.mergeRectangles_(rects);
+  };
+
+  /**
+   * Fast check to test whether the edges density inside the block is greater
+   * than a threshold, if true it tests the stages. This can improve
+   * significantly performance.
+   * @param {number} edgesDensity Percentage density edges inside the
+   *     classifier block.
+   * @param {array} integralImageSobel The integral image of a sobel image.
+   * @param {number} i Vertical position of the pixel to be evaluated.
+   * @param {number} j Horizontal position of the pixel to be evaluated.
+   * @param {number} width The image width.
+   * @return {boolean} True whether the block at position i,j can be skipped,
+   *     false otherwise.
+   * @static
+   * @protected
+   */
+  tracking.ViolaJones.isTriviallyExcluded = function(edgesDensity, integralImageSobel, i, j, width, blockWidth, blockHeight) {
+    var wbA = i * width + j;
+    var wbB = wbA + blockWidth;
+    var wbD = wbA + blockHeight * width;
+    var wbC = wbD + blockWidth;
+    var blockEdgesDensity = (integralImageSobel[wbA] - integralImageSobel[wbB] - integralImageSobel[wbD] + integralImageSobel[wbC]) / (blockWidth * blockHeight * 255);
+    if (blockEdgesDensity < edgesDensity) {
+      return true;
+    }
+    return false;
+  };
+
+  /**
+   * Evaluates if the block size on i,j position is a valid HAAR cascade
+   * stage.
+   * @param {number} data The HAAR cascade data.
+   * @param {number} i Vertical position of the pixel to be evaluated.
+   * @param {number} j Horizontal position of the pixel to be evaluated.
+   * @param {number} width The image width.
+   * @param {number} blockSize The block size.
+   * @param {number} scale The scale factor of the block size and its original
+   *     size.
+   * @param {number} inverseArea The inverse area of the block size.
+   * @return {boolean} Whether the region passes all the stage tests.
+   * @private
+   * @static
+   */
+  tracking.ViolaJones.evalStages_ = function(data, integralImage, integralImageSquare, tiltedIntegralImage, i, j, width, blockWidth, blockHeight, scale) {
+    var inverseArea = 1.0 / (blockWidth * blockHeight);
+    var wbA = i * width + j;
+    var wbB = wbA + blockWidth;
+    var wbD = wbA + blockHeight * width;
+    var wbC = wbD + blockWidth;
+    var mean = (integralImage[wbA] - integralImage[wbB] - integralImage[wbD] + integralImage[wbC]) * inverseArea;
+    var variance = (integralImageSquare[wbA] - integralImageSquare[wbB] - integralImageSquare[wbD] + integralImageSquare[wbC]) * inverseArea - mean * mean;
+
+    var standardDeviation = 1;
+    if (variance > 0) {
+      standardDeviation = Math.sqrt(variance);
+    }
+
+    var length = data.length;
+
+    for (var w = 2; w < length; ) {
+      var stageSum = 0;
+      var stageThreshold = data[w++];
+      var nodeLength = data[w++];
+
+      while (nodeLength--) {
+        var rectsSum = 0;
+        var tilted = data[w++];
+        var rectsLength = data[w++];
+
+        for (var r = 0; r < rectsLength; r++) {
+          var rectLeft = (j + data[w++] * scale + 0.5) | 0;
+          var rectTop = (i + data[w++] * scale + 0.5) | 0;
+          var rectWidth = (data[w++] * scale + 0.5) | 0;
+          var rectHeight = (data[w++] * scale + 0.5) | 0;
+          var rectWeight = data[w++];
+
+          var w1;
+          var w2;
+          var w3;
+          var w4;
+          if (tilted) {
+            // RectSum(r) = RSAT(x-h+w, y+w+h-1) + RSAT(x, y-1) - RSAT(x-h, y+h-1) - RSAT(x+w, y+w-1)
+            w1 = (rectLeft - rectHeight + rectWidth) + (rectTop + rectWidth + rectHeight - 1) * width;
+            w2 = rectLeft + (rectTop - 1) * width;
+            w3 = (rectLeft - rectHeight) + (rectTop + rectHeight - 1) * width;
+            w4 = (rectLeft + rectWidth) + (rectTop + rectWidth - 1) * width;
+            rectsSum += (tiltedIntegralImage[w1] + tiltedIntegralImage[w2] - tiltedIntegralImage[w3] - tiltedIntegralImage[w4]) * rectWeight;
+          } else {
+            // RectSum(r) = SAT(x-1, y-1) + SAT(x+w-1, y+h-1) - SAT(x-1, y+h-1) - SAT(x+w-1, y-1)
+            w1 = rectTop * width + rectLeft;
+            w2 = w1 + rectWidth;
+            w3 = w1 + rectHeight * width;
+            w4 = w3 + rectWidth;
+            rectsSum += (integralImage[w1] - integralImage[w2] - integralImage[w3] + integralImage[w4]) * rectWeight;
+            // TODO: Review the code below to analyze performance when using it instead.
+            // w1 = (rectLeft - 1) + (rectTop - 1) * width;
+            // w2 = (rectLeft + rectWidth - 1) + (rectTop + rectHeight - 1) * width;
+            // w3 = (rectLeft - 1) + (rectTop + rectHeight - 1) * width;
+            // w4 = (rectLeft + rectWidth - 1) + (rectTop - 1) * width;
+            // rectsSum += (integralImage[w1] + integralImage[w2] - integralImage[w3] - integralImage[w4]) * rectWeight;
+          }
+        }
+
+        var nodeThreshold = data[w++];
+        var nodeLeft = data[w++];
+        var nodeRight = data[w++];
+
+        if (rectsSum * inverseArea < nodeThreshold * standardDeviation) {
+          stageSum += nodeLeft;
+        } else {
+          stageSum += nodeRight;
+        }
+      }
+
+      if (stageSum < stageThreshold) {
+        return false;
+      }
+    }
+    return true;
+  };
+
+  /**
+   * Postprocess the detected sub-windows in order to combine overlapping
+   * detections into a single detection.
+   * @param {array} rects
+   * @return {array}
+   * @private
+   * @static
+   */
+  tracking.ViolaJones.mergeRectangles_ = function(rects) {
+    var disjointSet = new tracking.DisjointSet(rects.length);
+
+    for (var i = 0; i < rects.length; i++) {
+      var r1 = rects[i];
+      for (var j = 0; j < rects.length; j++) {
+        var r2 = rects[j];
+        if (tracking.Math.intersectRect(r1.x, r1.y, r1.x + r1.width, r1.y + r1.height, r2.x, r2.y, r2.x + r2.width, r2.y + r2.height)) {
+          var x1 = Math.max(r1.x, r2.x);
+          var y1 = Math.max(r1.y, r2.y);
+          var x2 = Math.min(r1.x + r1.width, r2.x + r2.width);
+          var y2 = Math.min(r1.y + r1.height, r2.y + r2.height);
+          var overlap = (x1 - x2) * (y1 - y2);
+          var area1 = (r1.width * r1.height);
+          var area2 = (r2.width * r2.height);
+
+          if ((overlap / (area1 * (area1 / area2)) >= this.REGIONS_OVERLAP) &&
+            (overlap / (area2 * (area1 / area2)) >= this.REGIONS_OVERLAP)) {
+            disjointSet.union(i, j);
+          }
+        }
+      }
+    }
+
+    var map = {};
+    for (var k = 0; k < disjointSet.length; k++) {
+      var rep = disjointSet.find(k);
+      if (!map[rep]) {
+        map[rep] = {
+          total: 1,
+          width: rects[k].width,
+          height: rects[k].height,
+          x: rects[k].x,
+          y: rects[k].y
+        };
+        continue;
+      }
+      map[rep].total++;
+      map[rep].width += rects[k].width;
+      map[rep].height += rects[k].height;
+      map[rep].x += rects[k].x;
+      map[rep].y += rects[k].y;
+    }
+
+    var result = [];
+    Object.keys(map).forEach(function(key) {
+      var rect = map[key];
+      result.push({
+        total: rect.total,
+        width: (rect.width / rect.total + 0.5) | 0,
+        height: (rect.height / rect.total + 0.5) | 0,
+        x: (rect.x / rect.total + 0.5) | 0,
+        y: (rect.y / rect.total + 0.5) | 0
+      });
+    });
+
+    return result;
+  };
+
+}());
+
+(function() {
+  /**
+   * Brief intends for "Binary Robust Independent Elementary Features".This
+   * method generates a binary string for each keypoint found by an extractor
+   * method.
+   * @static
+   * @constructor
+   */
+  tracking.Brief = {};
+
+  /**
+   * The set of binary tests is defined by the nd (x,y)-location pairs
+   * uniquely chosen during the initialization. Values could vary between N =
+   * 128,256,512. N=128 yield good compromises between speed, storage
+   * efficiency, and recognition rate.
+   * @type {number}
+   */
+  tracking.Brief.N = 512;
+
+  /**
+   * Caches coordinates values of (x,y)-location pairs uniquely chosen during
+   * the initialization.
+   * @type {Object.<number, Int32Array>}
+   * @private
+   * @static
+   */
+  tracking.Brief.randomImageOffsets_ = {};
+
+  /**
+   * Caches delta values of (x,y)-location pairs uniquely chosen during
+   * the initialization.
+   * @type {Int32Array}
+   * @private
+   * @static
+   */
+  tracking.Brief.randomWindowOffsets_ = null;
+
+  /**
+   * Generates a binary string for each found keypoints extracted using an
+   * extractor method.
+   * @param {array} The grayscale pixels in a linear [p1,p2,...] array.
+   * @param {number} width The image width.
+   * @param {array} keypoints
+   * @return {Int32Array} Returns an array where for each four sequence int
+   *     values represent the descriptor binary string (128 bits) necessary
+   *     to describe the corner, e.g. [0,0,0,0, 0,0,0,0, ...].
+   * @static
+   */
+  tracking.Brief.getDescriptors = function(pixels, width, keypoints) {
+    // Optimizing divide by 32 operation using binary shift
+    // (this.N >> 5) === this.N/32.
+    var descriptors = new Int32Array((keypoints.length >> 1) * (this.N >> 5));
+    var descriptorWord = 0;
+    var offsets = this.getRandomOffsets_(width);
+    var position = 0;
+
+    for (var i = 0; i < keypoints.length; i += 2) {
+      var w = width * keypoints[i + 1] + keypoints[i];
+
+      var offsetsPosition = 0;
+      for (var j = 0, n = this.N; j < n; j++) {
+        if (pixels[offsets[offsetsPosition++] + w] < pixels[offsets[offsetsPosition++] + w]) {
+          // The bit in the position `j % 32` of descriptorWord should be set to 1. We do
+          // this by making an OR operation with a binary number that only has the bit
+          // in that position set to 1. That binary number is obtained by shifting 1 left by
+          // `j % 32` (which is the same as `j & 31` left) positions.
+          descriptorWord |= 1 << (j & 31);
+        }
+
+        // If the next j is a multiple of 32, we will need to use a new descriptor word to hold
+        // the next results.
+        if (!((j + 1) & 31)) {
+          descriptors[position++] = descriptorWord;
+          descriptorWord = 0;
+        }
+      }
+    }
+
+    return descriptors;
+  };
+
+  /**
+   * Matches sets of features {mi} and {m′j} extracted from two images taken
+   * from similar, and often successive, viewpoints. A classical procedure
+   * runs as follows. For each point {mi} in the first image, search in a
+   * region of the second image around location {mi} for point {m′j}. The
+   * search is based on the similarity of the local image windows, also known
+   * as kernel windows, centered on the points, which strongly characterizes
+   * the points when the images are sufficiently close. Once each keypoint is
+   * described with its binary string, they need to be compared with the
+   * closest matching point. Distance metric is critical to the performance of
+   * in- trusion detection systems. Thus using binary strings reduces the size
+   * of the descriptor and provides an interesting data structure that is fast
+   * to operate whose similarity can be measured by the Hamming distance.
+   * @param {array} keypoints1
+   * @param {array} descriptors1
+   * @param {array} keypoints2
+   * @param {array} descriptors2
+   * @return {Int32Array} Returns an array where the index is the corner1
+   *     index coordinate, and the value is the corresponding match index of
+   *     corner2, e.g. keypoints1=[x0,y0,x1,y1,...] and
+   *     keypoints2=[x'0,y'0,x'1,y'1,...], if x0 matches x'1 and x1 matches x'0,
+   *     the return array would be [3,0].
+   * @static
+   */
+  tracking.Brief.match = function(keypoints1, descriptors1, keypoints2, descriptors2) {
+    var len1 = keypoints1.length >> 1;
+    var len2 = keypoints2.length >> 1;
+    var matches = new Array(len1);
+
+    for (var i = 0; i < len1; i++) {
+      var min = Infinity;
+      var minj = 0;
+      for (var j = 0; j < len2; j++) {
+        var dist = 0;
+        // Optimizing divide by 32 operation using binary shift
+        // (this.N >> 5) === this.N/32.
+        for (var k = 0, n = this.N >> 5; k < n; k++) {
+          dist += tracking.Math.hammingWeight(descriptors1[i * n + k] ^ descriptors2[j * n + k]);
+        }
+        if (dist < min) {
+          min = dist;
+          minj = j;
+        }
+      }
+      matches[i] = {
+        index1: i,
+        index2: minj,
+        keypoint1: [keypoints1[2 * i], keypoints1[2 * i + 1]],
+        keypoint2: [keypoints2[2 * minj], keypoints2[2 * minj + 1]],
+        confidence: 1 - min / this.N
+      };
+    }
+
+    return matches;
+  };
+
+  /**
+   * Removes matches outliers by testing matches on both directions.
+   * @param {array} keypoints1
+   * @param {array} descriptors1
+   * @param {array} keypoints2
+   * @param {array} descriptors2
+   * @return {Int32Array} Returns an array where the index is the corner1
+   *     index coordinate, and the value is the corresponding match index of
+   *     corner2, e.g. keypoints1=[x0,y0,x1,y1,...] and
+   *     keypoints2=[x'0,y'0,x'1,y'1,...], if x0 matches x'1 and x1 matches x'0,
+   *     the return array would be [3,0].
+   * @static
+   */
+  tracking.Brief.reciprocalMatch = function(keypoints1, descriptors1, keypoints2, descriptors2) {
+    var matches = [];
+    if (keypoints1.length === 0 || keypoints2.length === 0) {
+      return matches;
+    }
+
+    var matches1 = tracking.Brief.match(keypoints1, descriptors1, keypoints2, descriptors2);
+    var matches2 = tracking.Brief.match(keypoints2, descriptors2, keypoints1, descriptors1);
+    for (var i = 0; i < matches1.length; i++) {
+      if (matches2[matches1[i].index2].index2 === i) {
+        matches.push(matches1[i]);
+      }
+    }
+    return matches;
+  };
+
+  /**
+   * Gets the coordinates values of (x,y)-location pairs uniquely chosen
+   * during the initialization.
+   * @return {array} Array with the random offset values.
+   * @private
+   */
+  tracking.Brief.getRandomOffsets_ = function(width) {
+    if (!this.randomWindowOffsets_) {
+      var windowPosition = 0;
+      var windowOffsets = new Int32Array(4 * this.N);
+      for (var i = 0; i < this.N; i++) {
+        windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
+        windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
+        windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
+        windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
+      }
+      this.randomWindowOffsets_ = windowOffsets;
+    }
+
+    if (!this.randomImageOffsets_[width]) {
+      var imagePosition = 0;
+      var imageOffsets = new Int32Array(2 * this.N);
+      for (var j = 0; j < this.N; j++) {
+        imageOffsets[imagePosition++] = this.randomWindowOffsets_[4 * j] * width + this.randomWindowOffsets_[4 * j + 1];
+        imageOffsets[imagePosition++] = this.randomWindowOffsets_[4 * j + 2] * width + this.randomWindowOffsets_[4 * j + 3];
+      }
+      this.randomImageOffsets_[width] = imageOffsets;
+    }
+
+    return this.randomImageOffsets_[width];
+  };
+}());
+
+(function() {
+  /**
+   * FAST intends for "Features from Accelerated Segment Test". This method
+   * performs a point segment test corner detection. The segment test
+   * criterion operates by considering a circle of sixteen pixels around the
+   * corner candidate p. The detector classifies p as a corner if there exists
+   * a set of n contiguous pixelsin the circle which are all brighter than the
+   * intensity of the candidate pixel Ip plus a threshold t, or all darker
+   * than Ip − t.
+   *
+   *       15 00 01
+   *    14          02
+   * 13                03
+   * 12       []       04
+   * 11                05
+   *    10          06
+   *       09 08 07
+   *
+   * For more reference:
+   * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.60.3991&rep=rep1&type=pdf
+   * @static
+   * @constructor
+   */
+  tracking.Fast = {};
+
+  /**
+   * Holds the threshold to determine whether the tested pixel is brighter or
+   * darker than the corner candidate p.
+   * @type {number}
+   * @default 40
+   * @static
+   */
+  tracking.Fast.THRESHOLD = 40;
+
+  /**
+   * Caches coordinates values of the circle surrounding the pixel candidate p.
+   * @type {Object.<number, Int32Array>}
+   * @private
+   * @static
+   */
+  tracking.Fast.circles_ = {};
+
+  /**
+   * Finds corners coordinates on the graysacaled image.
+   * @param {array} The grayscale pixels in a linear [p1,p2,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {number} threshold to determine whether the tested pixel is brighter or
+   *     darker than the corner candidate p. Default value is 40.
+   * @return {array} Array containing the coordinates of all found corners,
+   *     e.g. [x0,y0,x1,y1,...], where P(x0,y0) represents a corner coordinate.
+   * @static
+   */
+  tracking.Fast.findCorners = function(pixels, width, height, opt_threshold) {
+    var circleOffsets = this.getCircleOffsets_(width);
+    var circlePixels = new Int32Array(16);
+    var corners = [];
+
+    if (opt_threshold === undefined) {
+      opt_threshold = this.THRESHOLD;
+    }
+
+    // When looping through the image pixels, skips the first three lines from
+    // the image boundaries to constrain the surrounding circle inside the image
+    // area.
+    for (var i = 3; i < height - 3; i++) {
+      for (var j = 3; j < width - 3; j++) {
+        var w = i * width + j;
+        var p = pixels[w];
+
+        // Loops the circle offsets to read the pixel value for the sixteen
+        // surrounding pixels.
+        for (var k = 0; k < 16; k++) {
+          circlePixels[k] = pixels[w + circleOffsets[k]];
+        }
+
+        if (this.isCorner(p, circlePixels, opt_threshold)) {
+          // The pixel p is classified as a corner, as optimization increment j
+          // by the circle radius 3 to skip the neighbor pixels inside the
+          // surrounding circle. This can be removed without compromising the
+          // result.
+          corners.push(j, i);
+          j += 3;
+        }
+      }
+    }
+
+    return corners;
+  };
+
+  /**
+   * Checks if the circle pixel is brighter than the candidate pixel p by
+   * a threshold.
+   * @param {number} circlePixel The circle pixel value.
+   * @param {number} p The value of the candidate pixel p.
+   * @param {number} threshold
+   * @return {Boolean}
+   * @static
+   */
+  tracking.Fast.isBrighter = function(circlePixel, p, threshold) {
+    return circlePixel - p > threshold;
+  };
+
+  /**
+   * Checks if the circle pixel is within the corner of the candidate pixel p
+   * by a threshold.
+   * @param {number} p The value of the candidate pixel p.
+   * @param {number} circlePixel The circle pixel value.
+   * @param {number} threshold
+   * @return {Boolean}
+   * @static
+   */
+  tracking.Fast.isCorner = function(p, circlePixels, threshold) {
+    if (this.isTriviallyExcluded(circlePixels, p, threshold)) {
+      return false;
+    }
+
+    for (var x = 0; x < 16; x++) {
+      var darker = true;
+      var brighter = true;
+
+      for (var y = 0; y < 9; y++) {
+        var circlePixel = circlePixels[(x + y) & 15];
+
+        if (!this.isBrighter(p, circlePixel, threshold)) {
+          brighter = false;
+          if (darker === false) {
+            break;
+          }
+        }
+
+        if (!this.isDarker(p, circlePixel, threshold)) {
+          darker = false;
+          if (brighter === false) {
+            break;
+          }
+        }
+      }
+
+      if (brighter || darker) {
+        return true;
+      }
+    }
+
+    return false;
+  };
+
+  /**
+   * Checks if the circle pixel is darker than the candidate pixel p by
+   * a threshold.
+   * @param {number} circlePixel The circle pixel value.
+   * @param {number} p The value of the candidate pixel p.
+   * @param {number} threshold
+   * @return {Boolean}
+   * @static
+   */
+  tracking.Fast.isDarker = function(circlePixel, p, threshold) {
+    return p - circlePixel > threshold;
+  };
+
+  /**
+   * Fast check to test if the candidate pixel is a trivially excluded value.
+   * In order to be a corner, the candidate pixel value should be darker or
+   * brighter than 9-12 surrounding pixels, when at least three of the top,
+   * bottom, left and right pixels are brighter or darker it can be
+   * automatically excluded improving the performance.
+   * @param {number} circlePixel The circle pixel value.
+   * @param {number} p The value of the candidate pixel p.
+   * @param {number} threshold
+   * @return {Boolean}
+   * @static
+   * @protected
+   */
+  tracking.Fast.isTriviallyExcluded = function(circlePixels, p, threshold) {
+    var count = 0;
+    var circleBottom = circlePixels[8];
+    var circleLeft = circlePixels[12];
+    var circleRight = circlePixels[4];
+    var circleTop = circlePixels[0];
+
+    if (this.isBrighter(circleTop, p, threshold)) {
+      count++;
+    }
+    if (this.isBrighter(circleRight, p, threshold)) {
+      count++;
+    }
+    if (this.isBrighter(circleBottom, p, threshold)) {
+      count++;
+    }
+    if (this.isBrighter(circleLeft, p, threshold)) {
+      count++;
+    }
+
+    if (count < 3) {
+      count = 0;
+      if (this.isDarker(circleTop, p, threshold)) {
+        count++;
+      }
+      if (this.isDarker(circleRight, p, threshold)) {
+        count++;
+      }
+      if (this.isDarker(circleBottom, p, threshold)) {
+        count++;
+      }
+      if (this.isDarker(circleLeft, p, threshold)) {
+        count++;
+      }
+      if (count < 3) {
+        return true;
+      }
+    }
+
+    return false;
+  };
+
+  /**
+   * Gets the sixteen offset values of the circle surrounding pixel.
+   * @param {number} width The image width.
+   * @return {array} Array with the sixteen offset values of the circle
+   *     surrounding pixel.
+   * @private
+   */
+  tracking.Fast.getCircleOffsets_ = function(width) {
+    if (this.circles_[width]) {
+      return this.circles_[width];
+    }
+
+    var circle = new Int32Array(16);
+
+    circle[0] = -width - width - width;
+    circle[1] = circle[0] + 1;
+    circle[2] = circle[1] + width + 1;
+    circle[3] = circle[2] + width + 1;
+    circle[4] = circle[3] + width;
+    circle[5] = circle[4] + width;
+    circle[6] = circle[5] + width - 1;
+    circle[7] = circle[6] + width - 1;
+    circle[8] = circle[7] - 1;
+    circle[9] = circle[8] - 1;
+    circle[10] = circle[9] - width - 1;
+    circle[11] = circle[10] - width - 1;
+    circle[12] = circle[11] - width;
+    circle[13] = circle[12] - width;
+    circle[14] = circle[13] - width + 1;
+    circle[15] = circle[14] - width + 1;
+
+    this.circles_[width] = circle;
+    return circle;
+  };
+}());
+
+(function() {
+  /**
+   * Math utility.
+   * @static
+   * @constructor
+   */
+  tracking.Math = {};
+
+  /**
+   * Euclidean distance between two points P(x0, y0) and P(x1, y1).
+   * @param {number} x0 Horizontal coordinate of P0.
+   * @param {number} y0 Vertical coordinate of P0.
+   * @param {number} x1 Horizontal coordinate of P1.
+   * @param {number} y1 Vertical coordinate of P1.
+   * @return {number} The euclidean distance.
+   */
+  tracking.Math.distance = function(x0, y0, x1, y1) {
+    var dx = x1 - x0;
+    var dy = y1 - y0;
+
+    return Math.sqrt(dx * dx + dy * dy);
+  };
+
+  /**
+   * Calculates the Hamming weight of a string, which is the number of symbols that are
+   * different from the zero-symbol of the alphabet used. It is thus
+   * equivalent to the Hamming distance from the all-zero string of the same
+   * length. For the most typical case, a string of bits, this is the number
+   * of 1's in the string.
+   *
+   * Example:
+   *
+   * <pre>
+   *  Binary string     Hamming weight
+   *   11101                 4
+   *   11101010              5
+   * </pre>
+   *
+   * @param {number} i Number that holds the binary string to extract the hamming weight.
+   * @return {number} The hamming weight.
+   */
+  tracking.Math.hammingWeight = function(i) {
+    i = i - ((i >> 1) & 0x55555555);
+    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
+
+    return ((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
+  };
+
+  /**
+   * Generates a random number between [a, b] interval.
+   * @param {number} a
+   * @param {number} b
+   * @return {number}
+   */
+  tracking.Math.uniformRandom = function(a, b) {
+    return a + Math.random() * (b - a);
+  };
+
+  /**
+   * Tests if a rectangle intersects with another.
+   *
+   *  <pre>
+   *  x0y0 --------       x2y2 --------
+   *      |       |           |       |
+   *      -------- x1y1       -------- x3y3
+   * </pre>
+   *
+   * @param {number} x0 Horizontal coordinate of P0.
+   * @param {number} y0 Vertical coordinate of P0.
+   * @param {number} x1 Horizontal coordinate of P1.
+   * @param {number} y1 Vertical coordinate of P1.
+   * @param {number} x2 Horizontal coordinate of P2.
+   * @param {number} y2 Vertical coordinate of P2.
+   * @param {number} x3 Horizontal coordinate of P3.
+   * @param {number} y3 Vertical coordinate of P3.
+   * @return {boolean}
+   */
+  tracking.Math.intersectRect = function(x0, y0, x1, y1, x2, y2, x3, y3) {
+    return !(x2 > x1 || x3 < x0 || y2 > y1 || y3 < y0);
+  };
+
+}());
+
+(function() {
+  /**
+   * Matrix utility.
+   * @static
+   * @constructor
+   */
+  tracking.Matrix = {};
+
+  /**
+   * Loops the array organized as major-row order and executes `fn` callback
+   * for each iteration. The `fn` callback receives the following parameters:
+   * `(r,g,b,a,index,i,j)`, where `r,g,b,a` represents the pixel color with
+   * alpha channel, `index` represents the position in the major-row order
+   * array and `i,j` the respective indexes positions in two dimensions.
+   * @param {array} pixels The pixels in a linear [r,g,b,a,...] array to loop
+   *     through.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {function} fn The callback function for each pixel.
+   * @param {number} opt_jump Optional jump for the iteration, by default it
+   *     is 1, hence loops all the pixels of the array.
+   * @static
+   */
+  tracking.Matrix.forEach = function(pixels, width, height, fn, opt_jump) {
+    opt_jump = opt_jump || 1;
+    for (var i = 0; i < height; i += opt_jump) {
+      for (var j = 0; j < width; j += opt_jump) {
+        var w = i * width * 4 + j * 4;
+        fn.call(this, pixels[w], pixels[w + 1], pixels[w + 2], pixels[w + 3], w, i, j);
+      }
+    }
+  };
+
+  /**
+   * Calculates the per-element subtraction of two NxM matrices and returns a 
+   * new NxM matrix as the result.
+   * @param {matrix} a The first matrix.
+   * @param {matrix} a The second matrix.
+   * @static
+   */
+  tracking.Matrix.sub = function(a, b){
+    var res = tracking.Matrix.clone(a);
+    for(var i=0; i < res.length; i++){
+      for(var j=0; j < res[i].length; j++){
+        res[i][j] -= b[i][j]; 
+      }
+    }
+    return res;
+  }
+
+  /**
+   * Calculates the per-element sum of two NxM matrices and returns a new NxM
+   * NxM matrix as the result.
+   * @param {matrix} a The first matrix.
+   * @param {matrix} a The second matrix.
+   * @static
+   */
+  tracking.Matrix.add = function(a, b){
+    var res = tracking.Matrix.clone(a);
+    for(var i=0; i < res.length; i++){
+      for(var j=0; j < res[i].length; j++){
+        res[i][j] += b[i][j]; 
+      }
+    }
+    return res;
+  }
+
+  /**
+   * Clones a matrix (or part of it) and returns a new matrix as the result.
+   * @param {matrix} src The matrix to be cloned.
+   * @param {number} width The second matrix.
+   * @static
+   */
+  tracking.Matrix.clone = function(src, width, height){
+    width = width || src[0].length;
+    height = height || src.length;
+    var temp = new Array(height);
+    var i = height;
+    while(i--){
+      temp[i] = new Array(width);
+      var j = width;
+      while(j--) temp[i][j] = src[i][j];
+    } 
+    return temp;
+  }
+
+  /**
+   * Multiply a matrix by a scalar and returns a new matrix as the result.
+   * @param {number} scalar The scalar to multiply the matrix by.
+   * @param {matrix} src The matrix to be multiplied.
+   * @static
+   */
+  tracking.Matrix.mulScalar = function(scalar, src){
+    var res = tracking.Matrix.clone(src);
+    for(var i=0; i < src.length; i++){
+      for(var j=0; j < src[i].length; j++){
+        res[i][j] *= scalar;
+      }
+    }
+    return res;
+  }
+
+  /**
+   * Transpose a matrix and returns a new matrix as the result.
+   * @param {matrix} src The matrix to be transposed.
+   * @static
+   */
+  tracking.Matrix.transpose = function(src){
+    var transpose = new Array(src[0].length);
+    for(var i=0; i < src[0].length; i++){
+      transpose[i] = new Array(src.length);
+      for(var j=0; j < src.length; j++){
+        transpose[i][j] = src[j][i];
+      }
+    }
+    return transpose;
+  }
+
+  /**
+   * Multiply an MxN matrix with an NxP matrix and returns a new MxP matrix
+   * as the result.
+   * @param {matrix} a The first matrix.
+   * @param {matrix} b The second matrix.
+   * @static
+   */
+  tracking.Matrix.mul = function(a, b) {
+    var res = new Array(a.length);
+    for (var i = 0; i < a.length; i++) {
+      res[i] = new Array(b[0].length);
+      for (var j = 0; j < b[0].length; j++) {
+        res[i][j] = 0;            
+        for (var k = 0; k < a[0].length; k++) {
+          res[i][j] += a[i][k] * b[k][j];
+        }
+      }
+    }
+    return res;
+  }
+
+  /**
+   * Calculates the absolute norm of a matrix.
+   * @param {matrix} src The matrix which norm will be calculated.
+   * @static
+   */
+  tracking.Matrix.norm = function(src){
+    var res = 0;
+    for(var i=0; i < src.length; i++){
+      for(var j=0; j < src[i].length; j++){
+        res += src[i][j]*src[i][j];
+      }
+    }
+    return Math.sqrt(res);
+  }
+
+  /**
+   * Calculates and returns the covariance matrix of a set of vectors as well
+   * as the mean of the matrix.
+   * @param {matrix} src The matrix which covariance matrix will be calculated.
+   * @static
+   */
+  tracking.Matrix.calcCovarMatrix = function(src){
+
+    var mean = new Array(src.length);
+    for(var i=0; i < src.length; i++){
+      mean[i] = [0.0];
+      for(var j=0; j < src[i].length; j++){
+        mean[i][0] += src[i][j]/src[i].length;
+      }
+    }
+
+    var deltaFull = tracking.Matrix.clone(mean);
+    for(var i=0; i < deltaFull.length; i++){
+      for(var j=0; j < src[0].length - 1; j++){
+        deltaFull[i].push(deltaFull[i][0]);
+      }
+    }
+
+    var a = tracking.Matrix.sub(src, deltaFull);
+    var b = tracking.Matrix.transpose(a);
+    var covar = tracking.Matrix.mul(b,a); 
+    return [covar, mean];
+
+  }
+
+}());
+(function() {
+  /**
+   * EPnp utility.
+   * @static
+   * @constructor
+   */
+  tracking.EPnP = {};
+
+  tracking.EPnP.solve = function(objectPoints, imagePoints, cameraMatrix) {};
+}());
+
+(function() {
+  /**
+   * Tracker utility.
+   * @constructor
+   * @extends {tracking.EventEmitter}
+   */
+  tracking.Tracker = function() {
+    tracking.Tracker.base(this, 'constructor');
+  };
+
+  tracking.inherits(tracking.Tracker, tracking.EventEmitter);
+
+  /**
+   * Tracks the pixels on the array. This method is called for each video
+   * frame in order to emit `track` event.
+   * @param {Uint8ClampedArray} pixels The pixels data to track.
+   * @param {number} width The pixels canvas width.
+   * @param {number} height The pixels canvas height.
+   */
+  tracking.Tracker.prototype.track = function() {};
+}());
+
+(function() {
+  /**
+   * TrackerTask utility.
+   * @constructor
+   * @extends {tracking.EventEmitter}
+   */
+  tracking.TrackerTask = function(tracker) {
+    tracking.TrackerTask.base(this, 'constructor');
+
+    if (!tracker) {
+      throw new Error('Tracker instance not specified.');
+    }
+
+    this.setTracker(tracker);
+  };
+
+  tracking.inherits(tracking.TrackerTask, tracking.EventEmitter);
+
+  /**
+   * Holds the tracker instance managed by this task.
+   * @type {tracking.Tracker}
+   * @private
+   */
+  tracking.TrackerTask.prototype.tracker_ = null;
+
+  /**
+   * Holds if the tracker task is in running.
+   * @type {boolean}
+   * @private
+   */
+  tracking.TrackerTask.prototype.running_ = false;
+
+  /**
+   * Gets the tracker instance managed by this task.
+   * @return {tracking.Tracker}
+   */
+  tracking.TrackerTask.prototype.getTracker = function() {
+    return this.tracker_;
+  };
+
+  /**
+   * Returns true if the tracker task is in running, false otherwise.
+   * @return {boolean}
+   * @private
+   */
+  tracking.TrackerTask.prototype.inRunning = function() {
+    return this.running_;
+  };
+
+  /**
+   * Sets if the tracker task is in running.
+   * @param {boolean} running
+   * @private
+   */
+  tracking.TrackerTask.prototype.setRunning = function(running) {
+    this.running_ = running;
+  };
+
+  /**
+   * Sets the tracker instance managed by this task.
+   * @return {tracking.Tracker}
+   */
+  tracking.TrackerTask.prototype.setTracker = function(tracker) {
+    this.tracker_ = tracker;
+  };
+
+  /**
+   * Emits a `run` event on the tracker task for the implementers to run any
+   * child action, e.g. `requestAnimationFrame`.
+   * @return {object} Returns itself, so calls can be chained.
+   */
+  tracking.TrackerTask.prototype.run = function() {
+    var self = this;
+
+    if (this.inRunning()) {
+      return;
+    }
+
+    this.setRunning(true);
+    this.reemitTrackEvent_ = function(event) {
+      self.emit('track', event);
+    };
+    this.tracker_.on('track', this.reemitTrackEvent_);
+    this.emit('run');
+    return this;
+  };
+
+  /**
+   * Emits a `stop` event on the tracker task for the implementers to stop any
+   * child action being done, e.g. `requestAnimationFrame`.
+   * @return {object} Returns itself, so calls can be chained.
+   */
+  tracking.TrackerTask.prototype.stop = function() {
+    if (!this.inRunning()) {
+      return;
+    }
+
+    this.setRunning(false);
+    this.emit('stop');
+    this.tracker_.removeListener('track', this.reemitTrackEvent_);
+    return this;
+  };
+}());
+
+(function() {
+  /**
+   * ColorTracker utility to track colored blobs in a frame using color
+   * difference evaluation.
+   * @constructor
+   * @param {string|Array.<string>} opt_colors Optional colors to track.
+   * @extends {tracking.Tracker}
+   */
+  tracking.ColorTracker = function(opt_colors) {
+    tracking.ColorTracker.base(this, 'constructor');
+
+    if (typeof opt_colors === 'string') {
+      opt_colors = [opt_colors];
+    }
+
+    if (opt_colors) {
+      opt_colors.forEach(function(color) {
+        if (!tracking.ColorTracker.getColor(color)) {
+          throw new Error('Color not valid, try `new tracking.ColorTracker("magenta")`.');
+        }
+      });
+      this.setColors(opt_colors);
+    }
+  };
+
+  tracking.inherits(tracking.ColorTracker, tracking.Tracker);
+
+  /**
+   * Holds the known colors.
+   * @type {Object.<string, function>}
+   * @private
+   * @static
+   */
+  tracking.ColorTracker.knownColors_ = {};
+
+  /**
+   * Caches coordinates values of the neighbours surrounding a pixel.
+   * @type {Object.<number, Int32Array>}
+   * @private
+   * @static
+   */
+  tracking.ColorTracker.neighbours_ = {};
+
+  /**
+   * Registers a color as known color.
+   * @param {string} name The color name.
+   * @param {function} fn The color function to test if the passed (r,g,b) is
+   *     the desired color.
+   * @static
+   */
+  tracking.ColorTracker.registerColor = function(name, fn) {
+    tracking.ColorTracker.knownColors_[name] = fn;
+  };
+
+  /**
+   * Gets the known color function that is able to test whether an (r,g,b) is
+   * the desired color.
+   * @param {string} name The color name.
+   * @return {function} The known color test function.
+   * @static
+   */
+  tracking.ColorTracker.getColor = function(name) {
+    return tracking.ColorTracker.knownColors_[name];
+  };
+
+  /**
+   * Holds the colors to be tracked by the `ColorTracker` instance.
+   * @default ['magenta']
+   * @type {Array.<string>}
+   */
+  tracking.ColorTracker.prototype.colors = ['magenta'];
+
+  /**
+   * Holds the minimum dimension to classify a rectangle.
+   * @default 20
+   * @type {number}
+   */
+  tracking.ColorTracker.prototype.minDimension = 20;
+
+  /**
+   * Holds the maximum dimension to classify a rectangle.
+   * @default Infinity
+   * @type {number}
+   */
+  tracking.ColorTracker.prototype.maxDimension = Infinity;
+
+
+  /**
+   * Holds the minimum group size to be classified as a rectangle.
+   * @default 30
+   * @type {number}
+   */
+  tracking.ColorTracker.prototype.minGroupSize = 30;
+
+  /**
+   * Calculates the central coordinate from the cloud points. The cloud points
+   * are all points that matches the desired color.
+   * @param {Array.<number>} cloud Major row order array containing all the
+   *     points from the desired color, e.g. [x1, y1, c2, y2, ...].
+   * @param {number} total Total numbers of pixels of the desired color.
+   * @return {object} Object containing the x, y and estimated z coordinate of
+   *     the blog extracted from the cloud points.
+   * @private
+   */
+  tracking.ColorTracker.prototype.calculateDimensions_ = function(cloud, total) {
+    var maxx = -1;
+    var maxy = -1;
+    var minx = Infinity;
+    var miny = Infinity;
+
+    for (var c = 0; c < total; c += 2) {
+      var x = cloud[c];
+      var y = cloud[c + 1];
+
+      if (x < minx) {
+        minx = x;
+      }
+      if (x > maxx) {
+        maxx = x;
+      }
+      if (y < miny) {
+        miny = y;
+      }
+      if (y > maxy) {
+        maxy = y;
+      }
+    }
+
+    return {
+      width: maxx - minx,
+      height: maxy - miny,
+      x: minx,
+      y: miny
+    };
+  };
+
+  /**
+   * Gets the colors being tracked by the `ColorTracker` instance.
+   * @return {Array.<string>}
+   */
+  tracking.ColorTracker.prototype.getColors = function() {
+    return this.colors;
+  };
+
+  /**
+   * Gets the minimum dimension to classify a rectangle.
+   * @return {number}
+   */
+  tracking.ColorTracker.prototype.getMinDimension = function() {
+    return this.minDimension;
+  };
+
+  /**
+   * Gets the maximum dimension to classify a rectangle.
+   * @return {number}
+   */
+  tracking.ColorTracker.prototype.getMaxDimension = function() {
+    return this.maxDimension;
+  };
+
+  /**
+   * Gets the minimum group size to be classified as a rectangle.
+   * @return {number}
+   */
+  tracking.ColorTracker.prototype.getMinGroupSize = function() {
+    return this.minGroupSize;
+  };
+
+  /**
+   * Gets the eight offset values of the neighbours surrounding a pixel.
+   * @param {number} width The image width.
+   * @return {array} Array with the eight offset values of the neighbours
+   *     surrounding a pixel.
+   * @private
+   */
+  tracking.ColorTracker.prototype.getNeighboursForWidth_ = function(width) {
+    if (tracking.ColorTracker.neighbours_[width]) {
+      return tracking.ColorTracker.neighbours_[width];
+    }
+
+    var neighbours = new Int32Array(8);
+
+    neighbours[0] = -width * 4;
+    neighbours[1] = -width * 4 + 4;
+    neighbours[2] = 4;
+    neighbours[3] = width * 4 + 4;
+    neighbours[4] = width * 4;
+    neighbours[5] = width * 4 - 4;
+    neighbours[6] = -4;
+    neighbours[7] = -width * 4 - 4;
+
+    tracking.ColorTracker.neighbours_[width] = neighbours;
+
+    return neighbours;
+  };
+
+  /**
+   * Unites groups whose bounding box intersect with each other.
+   * @param {Array.<Object>} rects
+   * @private
+   */
+  tracking.ColorTracker.prototype.mergeRectangles_ = function(rects) {
+    var intersects;
+    var results = [];
+    var minDimension = this.getMinDimension();
+    var maxDimension = this.getMaxDimension();
+
+    for (var r = 0; r < rects.length; r++) {
+      var r1 = rects[r];
+      intersects = true;
+      for (var s = r + 1; s < rects.length; s++) {
+        var r2 = rects[s];
+        if (tracking.Math.intersectRect(r1.x, r1.y, r1.x + r1.width, r1.y + r1.height, r2.x, r2.y, r2.x + r2.width, r2.y + r2.height)) {
+          intersects = false;
+          var x1 = Math.min(r1.x, r2.x);
+          var y1 = Math.min(r1.y, r2.y);
+          var x2 = Math.max(r1.x + r1.width, r2.x + r2.width);
+          var y2 = Math.max(r1.y + r1.height, r2.y + r2.height);
+          r2.height = y2 - y1;
+          r2.width = x2 - x1;
+          r2.x = x1;
+          r2.y = y1;
+          break;
+        }
+      }
+
+      if (intersects) {
+        if (r1.width >= minDimension && r1.height >= minDimension) {
+          if (r1.width <= maxDimension && r1.height <= maxDimension) {
+            results.push(r1);
+          }
+        }
+      }
+    }
+
+    return results;
+  };
+
+  /**
+   * Sets the colors to be tracked by the `ColorTracker` instance.
+   * @param {Array.<string>} colors
+   */
+  tracking.ColorTracker.prototype.setColors = function(colors) {
+    this.colors = colors;
+  };
+
+  /**
+   * Sets the minimum dimension to classify a rectangle.
+   * @param {number} minDimension
+   */
+  tracking.ColorTracker.prototype.setMinDimension = function(minDimension) {
+    this.minDimension = minDimension;
+  };
+
+  /**
+   * Sets the maximum dimension to classify a rectangle.
+   * @param {number} maxDimension
+   */
+  tracking.ColorTracker.prototype.setMaxDimension = function(maxDimension) {
+    this.maxDimension = maxDimension;
+  };
+
+  /**
+   * Sets the minimum group size to be classified as a rectangle.
+   * @param {number} minGroupSize
+   */
+  tracking.ColorTracker.prototype.setMinGroupSize = function(minGroupSize) {
+    this.minGroupSize = minGroupSize;
+  };
+
+  /**
+   * Tracks the `Video` frames. This method is called for each video frame in
+   * order to emit `track` event.
+   * @param {Uint8ClampedArray} pixels The pixels data to track.
+   * @param {number} width The pixels canvas width.
+   * @param {number} height The pixels canvas height.
+   */
+  tracking.ColorTracker.prototype.track = function(pixels, width, height) {
+    var self = this;
+    var colors = this.getColors();
+
+    if (!colors) {
+      throw new Error('Colors not specified, try `new tracking.ColorTracker("magenta")`.');
+    }
+
+    var results = [];
+
+    colors.forEach(function(color) {
+      results = results.concat(self.trackColor_(pixels, width, height, color));
+    });
+
+    this.emit('track', {
+      data: results
+    });
+  };
+
+  /**
+   * Find the given color in the given matrix of pixels using Flood fill
+   * algorithm to determines the area connected to a given node in a
+   * multi-dimensional array.
+   * @param {Uint8ClampedArray} pixels The pixels data to track.
+   * @param {number} width The pixels canvas width.
+   * @param {number} height The pixels canvas height.
+   * @param {string} color The color to be found
+   * @private
+   */
+  tracking.ColorTracker.prototype.trackColor_ = function(pixels, width, height, color) {
+    var colorFn = tracking.ColorTracker.knownColors_[color];
+    var currGroup = new Int32Array(pixels.length >> 2);
+    var currGroupSize;
+    var currI;
+    var currJ;
+    var currW;
+    var marked = new Int8Array(pixels.length);
+    var minGroupSize = this.getMinGroupSize();
+    var neighboursW = this.getNeighboursForWidth_(width);
+    var queue = new Int32Array(pixels.length);
+    var queuePosition;
+    var results = [];
+    var w = -4;
+
+    if (!colorFn) {
+      return results;
+    }
+
+    for (var i = 0; i < height; i++) {
+      for (var j = 0; j < width; j++) {
+        w += 4;
+
+        if (marked[w]) {
+          continue;
+        }
+
+        currGroupSize = 0;
+
+        queuePosition = -1;
+        queue[++queuePosition] = w;
+        queue[++queuePosition] = i;
+        queue[++queuePosition] = j;
+
+        marked[w] = 1;
+
+        while (queuePosition >= 0) {
+          currJ = queue[queuePosition--];
+          currI = queue[queuePosition--];
+          currW = queue[queuePosition--];
+
+          if (colorFn(pixels[currW], pixels[currW + 1], pixels[currW + 2], pixels[currW + 3], currW, currI, currJ)) {
+            currGroup[currGroupSize++] = currJ;
+            currGroup[currGroupSize++] = currI;
+
+            for (var k = 0; k < neighboursW.length; k++) {
+              var otherW = currW + neighboursW[k];
+              var otherI = currI + neighboursI[k];
+              var otherJ = currJ + neighboursJ[k];
+              if (!marked[otherW] && otherI >= 0 && otherI < height && otherJ >= 0 && otherJ < width) {
+                queue[++queuePosition] = otherW;
+                queue[++queuePosition] = otherI;
+                queue[++queuePosition] = otherJ;
+
+                marked[otherW] = 1;
+              }
+            }
+          }
+        }
+
+        if (currGroupSize >= minGroupSize) {
+          var data = this.calculateDimensions_(currGroup, currGroupSize);
+          if (data) {
+            data.color = color;
+            results.push(data);
+          }
+        }
+      }
+    }
+
+    return this.mergeRectangles_(results);
+  };
+
+  // Default colors
+  //===================
+
+  tracking.ColorTracker.registerColor('cyan', function(r, g, b) {
+    var thresholdGreen = 50,
+      thresholdBlue = 70,
+      dx = r - 0,
+      dy = g - 255,
+      dz = b - 255;
+
+    if ((g - r) >= thresholdGreen && (b - r) >= thresholdBlue) {
+      return true;
+    }
+    return dx * dx + dy * dy + dz * dz < 6400;
+  });
+
+  tracking.ColorTracker.registerColor('magenta', function(r, g, b) {
+    var threshold = 50,
+      dx = r - 255,
+      dy = g - 0,
+      dz = b - 255;
+
+    if ((r - g) >= threshold && (b - g) >= threshold) {
+      return true;
+    }
+    return dx * dx + dy * dy + dz * dz < 19600;
+  });
+
+  tracking.ColorTracker.registerColor('yellow', function(r, g, b) {
+    var threshold = 50,
+      dx = r - 255,
+      dy = g - 255,
+      dz = b - 0;
+
+    if ((r - b) >= threshold && (g - b) >= threshold) {
+      return true;
+    }
+    return dx * dx + dy * dy + dz * dz < 10000;
+  });
+
+
+  // Caching neighbour i/j offset values.
+  //=====================================
+  var neighboursI = new Int32Array([-1, -1, 0, 1, 1, 1, 0, -1]);
+  var neighboursJ = new Int32Array([0, 1, 1, 1, 0, -1, -1, -1]);
+}());
+
+(function() {
+  /**
+   * ObjectTracker utility.
+   * @constructor
+   * @param {string|Array.<string|Array.<number>>} opt_classifiers Optional
+   *     object classifiers to track.
+   * @extends {tracking.Tracker}
+   */
+  tracking.ObjectTracker = function(opt_classifiers) {
+    tracking.ObjectTracker.base(this, 'constructor');
+
+    if (opt_classifiers) {
+      if (!Array.isArray(opt_classifiers)) {
+        opt_classifiers = [opt_classifiers];
+      }
+
+      if (Array.isArray(opt_classifiers)) {
+        opt_classifiers.forEach(function(classifier, i) {
+          if (typeof classifier === 'string') {
+            opt_classifiers[i] = tracking.ViolaJones.classifiers[classifier];
+          }
+          if (!opt_classifiers[i]) {
+            throw new Error('Object classifier not valid, try `new tracking.ObjectTracker("face")`.');
+          }
+        });
+      }
+    }
+
+    this.setClassifiers(opt_classifiers);
+  };
+
+  tracking.inherits(tracking.ObjectTracker, tracking.Tracker);
+
+  /**
+   * Specifies the edges density of a block in order to decide whether to skip
+   * it or not.
+   * @default 0.2
+   * @type {number}
+   */
+  tracking.ObjectTracker.prototype.edgesDensity = 0.2;
+
+  /**
+   * Specifies the initial scale to start the feature block scaling.
+   * @default 1.0
+   * @type {number}
+   */
+  tracking.ObjectTracker.prototype.initialScale = 1.0;
+
+  /**
+   * Specifies the scale factor to scale the feature block.
+   * @default 1.25
+   * @type {number}
+   */
+  tracking.ObjectTracker.prototype.scaleFactor = 1.25;
+
+  /**
+   * Specifies the block step size.
+   * @default 1.5
+   * @type {number}
+   */
+  tracking.ObjectTracker.prototype.stepSize = 1.5;
+
+  /**
+   * Gets the tracker HAAR classifiers.
+   * @return {TypedArray.<number>}
+   */
+  tracking.ObjectTracker.prototype.getClassifiers = function() {
+    return this.classifiers;
+  };
+
+  /**
+   * Gets the edges density value.
+   * @return {number}
+   */
+  tracking.ObjectTracker.prototype.getEdgesDensity = function() {
+    return this.edgesDensity;
+  };
+
+  /**
+   * Gets the initial scale to start the feature block scaling.
+   * @return {number}
+   */
+  tracking.ObjectTracker.prototype.getInitialScale = function() {
+    return this.initialScale;
+  };
+
+  /**
+   * Gets the scale factor to scale the feature block.
+   * @return {number}
+   */
+  tracking.ObjectTracker.prototype.getScaleFactor = function() {
+    return this.scaleFactor;
+  };
+
+  /**
+   * Gets the block step size.
+   * @return {number}
+   */
+  tracking.ObjectTracker.prototype.getStepSize = function() {
+    return this.stepSize;
+  };
+
+  /**
+   * Tracks the `Video` frames. This method is called for each video frame in
+   * order to emit `track` event.
+   * @param {Uint8ClampedArray} pixels The pixels data to track.
+   * @param {number} width The pixels canvas width.
+   * @param {number} height The pixels canvas height.
+   */
+  tracking.ObjectTracker.prototype.track = function(pixels, width, height) {
+    var self = this;
+    var classifiers = this.getClassifiers();
+
+    if (!classifiers) {
+      throw new Error('Object classifier not specified, try `new tracking.ObjectTracker("face")`.');
+    }
+
+    var results = [];
+
+    classifiers.forEach(function(classifier) {
+      results = results.concat(tracking.ViolaJones.detect(pixels, width, height, self.getInitialScale(), self.getScaleFactor(), self.getStepSize(), self.getEdgesDensity(), classifier));
+    });
+
+    this.emit('track', {
+      data: results
+    });
+  };
+
+  /**
+   * Sets the tracker HAAR classifiers.
+   * @param {TypedArray.<number>} classifiers
+   */
+  tracking.ObjectTracker.prototype.setClassifiers = function(classifiers) {
+    this.classifiers = classifiers;
+  };
+
+  /**
+   * Sets the edges density.
+   * @param {number} edgesDensity
+   */
+  tracking.ObjectTracker.prototype.setEdgesDensity = function(edgesDensity) {
+    this.edgesDensity = edgesDensity;
+  };
+
+  /**
+   * Sets the initial scale to start the block scaling.
+   * @param {number} initialScale
+   */
+  tracking.ObjectTracker.prototype.setInitialScale = function(initialScale) {
+    this.initialScale = initialScale;
+  };
+
+  /**
+   * Sets the scale factor to scale the feature block.
+   * @param {number} scaleFactor
+   */
+  tracking.ObjectTracker.prototype.setScaleFactor = function(scaleFactor) {
+    this.scaleFactor = scaleFactor;
+  };
+
+  /**
+   * Sets the block step size.
+   * @param {number} stepSize
+   */
+  tracking.ObjectTracker.prototype.setStepSize = function(stepSize) {
+    this.stepSize = stepSize;
+  };
+
+}());
+
+(function() {
+
+
+  tracking.LandmarksTracker = function() {
+    tracking.LandmarksTracker.base(this, 'constructor');
+  }
+
+  tracking.inherits(tracking.LandmarksTracker, tracking.ObjectTracker);
+
+  tracking.LandmarksTracker.prototype.track = function(pixels, width, height) {
+	 
+    var image = {
+      'data': pixels,
+      'width': width,
+      'height': height
+    };
+
+    var classifier = tracking.ViolaJones.classifiers['face'];
+
+    var faces = tracking.ViolaJones.detect(pixels, width, height, 
+      this.getInitialScale(), this.getScaleFactor(), this.getStepSize(), 
+      this.getEdgesDensity(), classifier);
+
+    var landmarks = tracking.LBF.align(pixels, width, height, faces);
+
+    this.emit('track', {
+      'data': {
+        'faces' : faces,
+        'landmarks' : landmarks
+      }
+    });
+
+  }
+
+}());
+
+(function() {
+
+  tracking.LBF = {};
+
+  /**
+   * LBF Regressor utility.
+   * @constructor
+   */
+  tracking.LBF.Regressor = function(maxNumStages){
+    this.maxNumStages = maxNumStages;
+
+    this.rfs = new Array(maxNumStages);
+    this.models = new Array(maxNumStages);
+    for(var i=0; i < maxNumStages; i++){
+      this.rfs[i] = new tracking.LBF.RandomForest(i);
+      this.models[i] = tracking.LBF.RegressorData[i].models;
+    }
+
+    this.meanShape = tracking.LBF.LandmarksData;
+  }
+
+  /**
+   * Predicts the position of the landmarks based on the bounding box of the face.
+   * @param {pixels} pixels The grayscale pixels in a linear array.
+   * @param {number} width Width of the image.
+   * @param {number} height Height of the image.
+   * @param {object} boudingBox Bounding box of the face to be aligned.
+   * @return {matrix} A matrix with each landmark position in a row [x,y].
+   */
+  tracking.LBF.Regressor.prototype.predict = function(pixels, width, height, boundingBox) {
+
+    var images = [];
+    var currentShapes = [];
+    var boundingBoxes = [];
+
+    var meanShapeClone = tracking.Matrix.clone(this.meanShape);
+
+    images.push({
+      'data': pixels,
+      'width': width,
+      'height': height
+    });
+    boundingBoxes.push(boundingBox);
+
+    currentShapes.push(tracking.LBF.projectShapeToBoundingBox_(meanShapeClone, boundingBox));
+
+    for(var stage = 0; stage < this.maxNumStages; stage++){
+      var binaryFeatures = tracking.LBF.Regressor.deriveBinaryFeat(this.rfs[stage], images, currentShapes, boundingBoxes, meanShapeClone);
+      this.applyGlobalPrediction(binaryFeatures, this.models[stage], currentShapes, boundingBoxes);
+    }
+
+    return currentShapes[0];
+  };
+
+  /**
+   * Multiplies the binary features of the landmarks with the regression matrix
+   * to obtain the displacement for each landmark. Then applies this displacement
+   * into the landmarks shape.
+   * @param {object} binaryFeatures The binary features for the landmarks.
+   * @param {object} models The regressor models.
+   * @param {matrix} currentShapes The landmarks shapes.
+   * @param {array} boudingBoxes The bounding boxes of the faces.
+   */
+  tracking.LBF.Regressor.prototype.applyGlobalPrediction = function(binaryFeatures, models, currentShapes, 
+    boundingBoxes){
+
+    var residual = currentShapes[0].length * 2;
+
+    var rotation = [];
+    var deltashape = new Array(residual/2);
+    for(var i=0; i < residual/2; i++){
+      deltashape[i] = [0.0, 0.0];
+    }
+
+    for(var i=0; i < currentShapes.length; i++){
+      for(var j=0; j < residual; j++){
+        var tmp = 0;
+        for(var lx=0, idx=0; (idx = binaryFeatures[i][lx].index) != -1; lx++){
+          if(idx <= models[j].nr_feature){
+            tmp += models[j].data[(idx - 1)] * binaryFeatures[i][lx].value;
+          }
+        }
+        if(j < residual/2){
+          deltashape[j][0] = tmp;
+        }else{
+          deltashape[j - residual/2][1] = tmp;
+        }
+      }
+
+      var res = tracking.LBF.similarityTransform_(tracking.LBF.unprojectShapeToBoundingBox_(currentShapes[i], boundingBoxes[i]), this.meanShape);
+      var rotation = tracking.Matrix.transpose(res[0]);
+
+      var s = tracking.LBF.unprojectShapeToBoundingBox_(currentShapes[i], boundingBoxes[i]);
+      s = tracking.Matrix.add(s, deltashape);
+
+      currentShapes[i] = tracking.LBF.projectShapeToBoundingBox_(s, boundingBoxes[i]);
+
+    }
+  };
+
+  /**
+   * Derives the binary features from the image for each landmark. 
+   * @param {object} forest The random forest to search for the best binary feature match.
+   * @param {array} images The images with pixels in a grayscale linear array.
+   * @param {array} currentShapes The current landmarks shape.
+   * @param {array} boudingBoxes The bounding boxes of the faces.
+   * @param {matrix} meanShape The mean shape of the current landmarks set.
+   * @return {array} The binary features extracted from the image and matched with the
+   *     training data.
+   * @static
+   */
+  tracking.LBF.Regressor.deriveBinaryFeat = function(forest, images, currentShapes, boundingBoxes, meanShape){
+
+    var binaryFeatures = new Array(images.length);
+    for(var i=0; i < images.length; i++){
+      var t = forest.maxNumTrees * forest.landmarkNum + 1;
+      binaryFeatures[i] = new Array(t);
+      for(var j=0; j < t; j++){
+        binaryFeatures[i][j] = {};
+      }
+    }
+
+    var leafnodesPerTree = 1 << (forest.maxDepth - 1);
+
+    for(var i=0; i < images.length; i++){
+
+      var projectedShape = tracking.LBF.unprojectShapeToBoundingBox_(currentShapes[i], boundingBoxes[i]);
+      var transform = tracking.LBF.similarityTransform_(projectedShape, meanShape);
+      
+      for(var j=0; j < forest.landmarkNum; j++){
+        for(var k=0; k < forest.maxNumTrees; k++){
+
+          var binaryCode = tracking.LBF.Regressor.getCodeFromTree(forest.rfs[j][k], images[i], 
+                              currentShapes[i], boundingBoxes[i], transform[0], transform[1]);
+
+          var index = j*forest.maxNumTrees + k;
+          binaryFeatures[i][index].index = leafnodesPerTree * index + binaryCode;
+          binaryFeatures[i][index].value = 1;
+
+        }
+      }
+      binaryFeatures[i][forest.landmarkNum * forest.maxNumTrees].index = -1;
+      binaryFeatures[i][forest.landmarkNum * forest.maxNumTrees].value = -1;
+    }
+    return binaryFeatures;
+
+  }
+
+  /**
+   * Gets the binary code for a specific tree in a random forest. For each landmark,
+   * the position from two pre-defined points are recovered from the training data
+   * and then the intensity of the pixels corresponding to these points is extracted 
+   * from the image and used to traverse the trees in the random forest. At the end,
+   * the ending nodes will be represented by 1, and the remaining nodes by 0.
+   * 
+   * +--------------------------- Random Forest -----------------------------+ 
+   * | Ø = Ending leaf                                                       |
+   * |                                                                       |
+   * |       O             O             O             O             O       |
+   * |     /   \         /   \         /   \         /   \         /   \     |
+   * |    O     O       O     O       O     O       O     O       O     O    |
+   * |   / \   / \     / \   / \     / \   / \     / \   / \     / \   / \   |
+   * |  Ø   O O   O   O   O Ø   O   O   Ø O   O   O   O Ø   O   O   O O   Ø  |
+   * |  1   0 0   0   0   0 1   0   0   1 0   0   0   0 1   0   0   0 0   1  |
+   * +-----------------------------------------------------------------------+
+   * Final binary code for this landmark: 10000010010000100001
+   *
+   * @param {object} forest The tree to be analyzed.
+   * @param {array} image The image with pixels in a grayscale linear array.
+   * @param {matrix} shape The current landmarks shape.
+   * @param {object} boudingBoxes The bounding box of the face.
+   * @param {matrix} rotation The rotation matrix used to transform the projected landmarks
+   *     into the mean shape.
+   * @param {number} scale The scale factor used to transform the projected landmarks
+   *     into the mean shape.
+   * @return {number} The binary code extracted from the tree.
+   * @static
+   */
+  tracking.LBF.Regressor.getCodeFromTree = function(tree, image, shape, boundingBox, rotation, scale){
+    var current = 0;
+    var bincode = 0;
+
+    while(true){
+      
+      var x1 = Math.cos(tree.nodes[current].feats[0]) * tree.nodes[current].feats[2] * tree.maxRadioRadius * boundingBox.width;
+      var y1 = Math.sin(tree.nodes[current].feats[0]) * tree.nodes[current].feats[2] * tree.maxRadioRadius * boundingBox.height;
+      var x2 = Math.cos(tree.nodes[current].feats[1]) * tree.nodes[current].feats[3] * tree.maxRadioRadius * boundingBox.width;
+      var y2 = Math.sin(tree.nodes[current].feats[1]) * tree.nodes[current].feats[3] * tree.maxRadioRadius * boundingBox.height;
+
+      var project_x1 = rotation[0][0] * x1 + rotation[0][1] * y1;
+      var project_y1 = rotation[1][0] * x1 + rotation[1][1] * y1;
+
+      var real_x1 = Math.floor(project_x1 + shape[tree.landmarkID][0]);
+      var real_y1 = Math.floor(project_y1 + shape[tree.landmarkID][1]);
+      real_x1 = Math.max(0.0, Math.min(real_x1, image.height - 1.0));
+      real_y1 = Math.max(0.0, Math.min(real_y1, image.width - 1.0));
+
+      var project_x2 = rotation[0][0] * x2 + rotation[0][1] * y2;
+      var project_y2 = rotation[1][0] * x2 + rotation[1][1] * y2;
+
+      var real_x2 = Math.floor(project_x2 + shape[tree.landmarkID][0]);
+      var real_y2 = Math.floor(project_y2 + shape[tree.landmarkID][1]);
+      real_x2 = Math.max(0.0, Math.min(real_x2, image.height - 1.0));
+      real_y2 = Math.max(0.0, Math.min(real_y2, image.width - 1.0));
+      var pdf = Math.floor(image.data[real_y1*image.width + real_x1]) - 
+          Math.floor(image.data[real_y2 * image.width +real_x2]);
+
+      if(pdf < tree.nodes[current].thresh){
+        current = tree.nodes[current].cnodes[0];
+      }else{
+        current = tree.nodes[current].cnodes[1];
+      }
+
+      if (tree.nodes[current].is_leafnode == 1) {
+        bincode = 1;
+        for (var i=0; i < tree.leafnodes.length; i++) {
+          if (tree.leafnodes[i] == current) {
+            return bincode;
+          }
+          bincode++;
+        }
+        return bincode;
+      }
+
+    }
+
+    return bincode;
+  }
+
+}());
+(function() {
+  /**
+   * Face Alignment via Regressing Local Binary Features (LBF)
+   * This approach has two components: a set of local binary features and
+   * a locality principle for learning those features.
+   * The locality principle is used to guide the learning of a set of highly
+   * discriminative local binary features for each landmark independently.
+   * The obtained local binary features are used to learn a linear regression
+   * that later will be used to guide the landmarks in the alignment phase.
+   * 
+   * @authors: VoxarLabs Team (http://cin.ufpe.br/~voxarlabs)
+   *           Lucas Figueiredo <lsf@cin.ufpe.br>, Thiago Menezes <tmc2@cin.ufpe.br>,
+   *           Thiago Domingues <tald@cin.ufpe.br>, Rafael Roberto <rar3@cin.ufpe.br>,
+   *           Thulio Araujo <tlsa@cin.ufpe.br>, Joao Victor <jvfl@cin.ufpe.br>,
+   *           Tomer Simis <tls@cin.ufpe.br>)
+   */
+  
+  /**
+   * Holds the maximum number of stages that will be used in the alignment algorithm.
+   * Each stage contains a different set of random forests and retrieves the binary
+   * code from a more "specialized" (i.e. smaller) region around the landmarks.
+   * @type {number}
+   * @static
+   */
+  tracking.LBF.maxNumStages = 4;
+
+  /**
+   * Holds the regressor that will be responsible for extracting the local features from 
+   * the image and guide the landmarks using the training data.
+   * @type {object}
+   * @protected
+   * @static
+   */
+  tracking.LBF.regressor_ = null; 
+  
+  /**
+   * Generates a set of landmarks for a set of faces
+   * @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
+   * @param {number} width The image width.
+   * @param {number} height The image height.
+   * @param {array} faces The list of faces detected in the image
+   * @return {array} The aligned landmarks, each set of landmarks corresponding
+   *     to a specific face.
+   * @static
+   */
+  tracking.LBF.align = function(pixels, width, height, faces){
+
+    if(tracking.LBF.regressor_ == null){
+      tracking.LBF.regressor_ = new tracking.LBF.Regressor(
+        tracking.LBF.maxNumStages
+      );
+    }
+
+    pixels = tracking.Image.grayscale(pixels, width, height, false);
+
+    pixels = tracking.Image.equalizeHist(pixels, width, height);
+
+    var shapes = new Array(faces.length);
+
+    for(var i in faces){
+
+      faces[i].height = faces[i].width;
+
+      var boundingBox = {};
+      boundingBox.startX = faces[i].x;
+      boundingBox.startY = faces[i].y;
+      boundingBox.width = faces[i].width;
+      boundingBox.height = faces[i].height;
+
+      shapes[i] = tracking.LBF.regressor_.predict(pixels, width, height, boundingBox);
+    }
+
+    return shapes;
+  }
+
+  /**
+   * Unprojects the landmarks shape from the bounding box.
+   * @param {matrix} shape The landmarks shape.
+   * @param {matrix} boudingBox The bounding box.
+   * @return {matrix} The landmarks shape projected into the bounding box.
+   * @static
+   * @protected
+   */
+  tracking.LBF.unprojectShapeToBoundingBox_ = function(shape, boundingBox){
+    var temp = new Array(shape.length);
+    for(var i=0; i < shape.length; i++){
+      temp[i] = [
+        (shape[i][0] - boundingBox.startX) / boundingBox.width,
+        (shape[i][1] - boundingBox.startY) / boundingBox.height
+      ];
+    }
+    return temp;
+  }
+
+  /**
+   * Projects the landmarks shape into the bounding box. The landmarks shape has
+   * normalized coordinates, so it is necessary to map these coordinates into
+   * the bounding box coordinates.
+   * @param {matrix} shape The landmarks shape.
+   * @param {matrix} boudingBox The bounding box.
+   * @return {matrix} The landmarks shape.
+   * @static
+   * @protected
+   */
+  tracking.LBF.projectShapeToBoundingBox_ = function(shape, boundingBox){
+    var temp = new Array(shape.length);
+    for(var i=0; i < shape.length; i++){
+      temp[i] = [
+        shape[i][0] * boundingBox.width + boundingBox.startX,
+        shape[i][1] * boundingBox.height + boundingBox.startY
+      ];
+    }
+    return temp;
+  }
+
+  /**
+   * Calculates the rotation and scale necessary to transform shape1 into shape2.
+   * @param {matrix} shape1 The shape to be transformed.
+   * @param {matrix} shape2 The shape to be transformed in.
+   * @return {[matrix, scalar]} The rotation matrix and scale that applied to shape1
+   *     results in shape2.
+   * @static
+   * @protected
+   */
+  tracking.LBF.similarityTransform_ = function(shape1, shape2){
+
+    var center1 = [0,0];
+    var center2 = [0,0];
+    for (var i = 0; i < shape1.length; i++) {
+      center1[0] += shape1[i][0];
+      center1[1] += shape1[i][1];
+      center2[0] += shape2[i][0];
+      center2[1] += shape2[i][1];
+    }
+    center1[0] /= shape1.length;
+    center1[1] /= shape1.length;
+    center2[0] /= shape2.length;
+    center2[1] /= shape2.length;
+
+    var temp1 = tracking.Matrix.clone(shape1);
+    var temp2 = tracking.Matrix.clone(shape2);
+    for(var i=0; i < shape1.length; i++){
+      temp1[i][0] -= center1[0];
+      temp1[i][1] -= center1[1];
+      temp2[i][0] -= center2[0];
+      temp2[i][1] -= center2[1];
+    }
+
+    var covariance1, covariance2;
+    var mean1, mean2;
+
+    var t = tracking.Matrix.calcCovarMatrix(temp1);
+    covariance1 = t[0];
+    mean1 = t[1];
+
+    t = tracking.Matrix.calcCovarMatrix(temp2);
+    covariance2 = t[0];
+    mean2 = t[1];
+
+    var s1 = Math.sqrt(tracking.Matrix.norm(covariance1));
+    var s2 = Math.sqrt(tracking.Matrix.norm(covariance2));
+
+    var scale = s1/s2;
+    temp1 = tracking.Matrix.mulScalar(1.0/s1, temp1);
+    temp2 = tracking.Matrix.mulScalar(1.0/s2, temp2);
+
+    var num = 0, den = 0;
+    for (var i = 0; i < shape1.length; i++) {
+      num = num + temp1[i][1] * temp2[i][0] - temp1[i][0] * temp2[i][1];
+      den = den + temp1[i][0] * temp2[i][0] + temp1[i][1] * temp2[i][1];
+    }
+
+    var norm = Math.sqrt(num*num + den*den);
+    var sin_theta = num/norm;
+    var cos_theta = den/norm;
+    var rotation = [
+      [cos_theta, -sin_theta],
+      [sin_theta, cos_theta]
+    ];
+
+    return [rotation, scale];
+  }
+
+  /**
+   * LBF Random Forest data structure.
+   * @static
+   * @constructor
+   */
+  tracking.LBF.RandomForest = function(forestIndex){
+    this.maxNumTrees = tracking.LBF.RegressorData[forestIndex].max_numtrees;
+    this.landmarkNum = tracking.LBF.RegressorData[forestIndex].num_landmark;
+    this.maxDepth = tracking.LBF.RegressorData[forestIndex].max_depth;
+    this.stages = tracking.LBF.RegressorData[forestIndex].stages; 
+
+    this.rfs = new Array(this.landmarkNum);
+    for(var i=0; i < this.landmarkNum; i++){
+      this.rfs[i] = new Array(this.maxNumTrees);
+      for(var j=0; j < this.maxNumTrees; j++){
+        this.rfs[i][j] = new tracking.LBF.Tree(forestIndex, i, j);
+      }
+    }
+  }
+
+  /**
+   * LBF Tree data structure.
+   * @static
+   * @constructor
+   */
+  tracking.LBF.Tree = function(forestIndex, landmarkIndex, treeIndex){
+    var data = tracking.LBF.RegressorData[forestIndex].landmarks[landmarkIndex][treeIndex];
+    this.maxDepth = data.max_depth;
+    this.maxNumNodes = data.max_numnodes;
+    this.nodes = data.nodes;
+    this.landmarkID = data.landmark_id;
+    this.numLeafnodes = data.num_leafnodes;
+    this.numNodes = data.num_nodes;
+    this.maxNumFeats = data.max_numfeats;
+    this.maxRadioRadius = data.max_radio_radius;
+    this.leafnodes = data.id_leafnodes;
+  }
+
+}());

+ 107 - 255
src/components/face/icollect.vue

@@ -1,271 +1,123 @@
 <template>
-  <div class="see">
-    <video
-      ref="myVideo"
-      muted
-      loop
-      playsinline
-      @loadedmetadata="fnRun"
-    ></video>
-    <canvas ref="myCanvas" />
+  <div>
+    <div class="video-box">
+      <video id="video" width="320" height="240" preload autoplay loop muted></video>
+      <canvas id="canvas" width="320" height="240"></canvas>
+    </div>
+    <canvas id="screenshotCanvas" width="320" height="240"></canvas>
+    <div class="switch-button">
+      <el-row>
+        <el-button type="primary" @click="destroyed">关闭摄像头</el-button>
+        <el-button type="primary" @click="init">开始识别</el-button>
+      </el-row>
+    </div>
   </div>
 </template>
 
 <script>
-import * as faceapi from "face-api.js";
-export default {
-  name: "WebRTCFaceRecognition",
-  data() {
-    return {
-      nets: "ssdMobilenetv1", // 模型
-      options: null, // 模型参数
-      withBoxes: true, // 框or轮廓
-      detectFace: "detectSingleFace", // 单or多人脸
-      detection: "landmark",
-      videoEl: null,
-      canvasEl: null,
-      timeout: 0,
-      // 视频媒体参数配置
-      constraints: {
-        audio: false,
-        video: {
-          // ideal(应用最理想的)
-          width: {
-            min: 320,
-            ideal: 320,
-            max: 320,
-          },
-          height: {
-            min: 240,
-            ideal: 240,
-            max: 240,
-          },
-          // frameRate受限带宽传输时,低帧率可能更适宜
-          frameRate: {
-            min: 15,
-            ideal: 30,
-            max: 60,
-          },
-          // 显示模式前置后置
-          facingMode: "environment",
-        },
-      },
-    };
-  },
-  // watch: {
-  //   detection(val) {
-  //     this.detection = val;
-  //     this.videoEl.pause();
-  //     setTimeout(() => {
-  //       this.videoEl.play();
-  //       setTimeout(() => this.fnRun(), 300);
-  //     }, 300);
-  //   },
-  // },
-  mounted() {
-    console.log("mounted")
-    this.$nextTick(() => {
-      this.fnInit();
-    });
-  },
-  methods: {
-    // 初始化模型加载
-    async fnInit() {
-      console.log("fnInit")
-       await faceapi.nets[this.nets].loadFromUri("/static/models"); // 算法模型
-      console.log("loadFromUri")
-       await faceapi.loadFaceLandmarkModel("/static/models"); // 轮廓模型
-      console.log("loadFaceLandmarkModel")
-      // await faceapi.loadFaceExpressionModel("/models"); // 表情模型
-      // await faceapi.loadAgeGenderModel("/models"); // 年龄模型
-      // 根据算法模型参数识别调整结果
-      this.options = new faceapi.SsdMobilenetv1Options({
-        minConfidence: 0.5, // 0.1 ~ 0.9
-      });
-      console.log("options", this.options)
-      // 节点属性化
-      this.videoEl = this.$refs["myVideo"]
-      // document.getElementById("myVideo");
-      this.canvasEl = this.$refs["myCanvas"]
-      // document.getElementById("myCanvas");
-      setTimeout(() => this.fnOpen(), 1000);
-      console.log("init", this.canvasEl, this.videoEl )
-    },
-    // 人脸面部勘探轮廓识别绘制
-    async fnRunFaceLandmark() {
-      console.log("RunFaceLandmark");
-      console.log("paused", this.videoEl.paused)
-      if (this.videoEl.paused) return clearTimeout(this.timeout);
-      console.log("paused", this.videoEl.paused)
-      // 识别绘制人脸信息
-      const result = await faceapi[this.detectFace](
-        this.videoEl,
-        this.options
-      ).withFaceLandmarks();
+  import tracking from '@/assets/tracking/build/tracking-min.js';
+  import '@/assets/tracking/build/data/face-min.js';
 
-      console.log("result", result)
-      if (result && !this.videoEl.paused) {
-        const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
-        const resizeResult = faceapi.resizeResults(result, dims);
-        this.withBoxes
-          ? faceapi.draw.drawDetections(this.canvasEl, resizeResult)
-          : faceapi.draw.drawFaceLandmarks(this.canvasEl, resizeResult);
-      } else {
-        this.canvasEl
-          .getContext("2d")
-          .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
+  export default {
+    data() {
+      return {
+        trackerTask: null,
+        mediaStreamTrack: null,
+        video: null,
+        screenshotCanvas: null,
+        uploadLock: true // 上传锁
       }
-      this.timeout = setTimeout(() => this.fnRunFaceLandmark());
     },
-    // 人脸表情识别绘制
-    async fnRunFaceExpression() {
-      if (this.videoEl.paused) return clearTimeout(this.timeout);
-      // 识别绘制人脸信息
-      const result = await faceapi[this.detectFace](this.videoEl, this.options)
-        .withFaceLandmarks()
-        .withFaceExpressions();
-      if (result && !this.videoEl.paused) {
-        const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
-        const resizeResult = faceapi.resizeResults(result, dims);
-        this.withBoxes
-          ? faceapi.draw.drawDetections(this.canvasEl, resizeResult)
-          : faceapi.draw.drawFaceLandmarks(this.canvasEl, resizeResult);
-        faceapi.draw.drawFaceExpressions(this.canvasEl, resizeResult, 0.05);
-      } else {
-        this.canvasEl
-          .getContext("2d")
-          .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
-      }
-      this.timeout = setTimeout(() => this.fnRunFaceExpression());
+    mounted() {
+      this.init();
     },
-    // 年龄性别识别绘制
-    async fnRunFaceAgeAndGender() {
-      if (this.videoEl.paused) return clearTimeout(this.timeout);
-      // 识别绘制人脸信息
-      const result = await faceapi[this.detectFace](this.videoEl, this.options)
-        .withFaceLandmarks()
-        .withAgeAndGender();
-      if (result && !this.videoEl.paused) {
-        const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
-        const resizeResults = faceapi.resizeResults(result, dims);
-        this.withBoxes
-          ? faceapi.draw.drawDetections(this.canvasEl, resizeResults)
-          : faceapi.draw.drawFaceLandmarks(this.canvasEl, resizeResults);
-        if (Array.isArray(resizeResults)) {
-          resizeResults.forEach((result) => {
-            const { age, gender, genderProbability } = result;
-            new faceapi.draw.DrawTextField(
-              [
-                `${Math.round(age, 0)} years`,
-                `${gender} (${Math.round(genderProbability)})`,
-              ],
-              result.detection.box.bottomLeft
-            ).draw(this.canvasEl);
+    methods: {
+      // 初始化设置
+      init() {
+        this.video = this.mediaStreamTrack = document.getElementById('video');
+        this.screenshotCanvas = document.getElementById('screenshotCanvas');
+
+        let canvas = document.getElementById('canvas');
+        let context = canvas.getContext('2d');
+
+        // 固定写法
+        let tracker = new window.tracking.ObjectTracker('face');
+        tracker.setInitialScale(4);
+        tracker.setStepSize(2);
+        tracker.setEdgesDensity(0.1);
+        //摄像头初始化
+        this.trackerTask = window.tracking.track('#video', tracker, {
+          camera: true
+        });
+
+        let _this = this;
+        tracker.on('track', function(event) {
+
+          // 检测出人脸 绘画人脸位置
+          context.clearRect(0, 0, canvas.width, canvas.height);
+          event.data.forEach(function(rect) {
+            context.strokeStyle = '#0764B7';
+            context.strokeRect(rect.x, rect.y, rect.width, rect.height);
           });
-        } else {
-          const { age, gender, genderProbability } = resizeResults;
-          new faceapi.draw.DrawTextField(
-            [
-              `${Math.round(age, 0)} years`,
-              `${gender} (${Math.round(genderProbability)})`,
-            ],
-            resizeResults.detection.box.bottomLeft
-          ).draw(this.canvasEl);
+          // event.data.length长度为多少代表检测几张人脸
+          if(_this.uploadLock && event.data.length){
+          //上传图片
+              _this.screenshotAndUpload();
+          }
+        });
+      },
+      // 上传图片
+      screenshotAndUpload() {
+        // 上锁避免重复发送请求
+        this.uploadLock = false;
+
+        // 绘制当前帧图片转换为base64格式
+        let canvas = this.screenshotCanvas;
+        let video = this.video;
+        let ctx = canvas.getContext('2d');
+        ctx.clearRect(0, 0, canvas.width, canvas.height);
+        ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
+        let base64Img = canvas.toDataURL('image/jpeg');
+
+        // 打印出 base64Img
+        console.log('base64Img', base64Img.length)
+
+        // 请求接口成功以后打开锁
+        this.uploadLock = true;
+      },
+      //关闭摄像头
+      destroyed(){
+        if(!this.mediaStreamTrack){
+          return
         }
-      } else {
-        this.canvasEl
-          .getContext("2d")
-          .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
+        this.mediaStreamTrack.srcObject.getTracks()[0].stop();
+        this.trackerTask.stop()
       }
-      this.timeout = setTimeout(() => this.fnRunFaceAgeAndGender());
-    },
-    // 执行检测识别类型
-    fnRun() {
-      if (this.detection === "landmark") {
-        this.fnRunFaceLandmark();
-        return;
-      }
-      if (this.detection === "expression") {
-        this.fnRunFaceExpression();
-        return;
-      }
-      if (this.detection === "age_gender") {
-        this.fnRunFaceAgeAndGender();
-        return;
-      }
-    },
-    // 启动摄像头视频媒体
-    fnOpen() {
-      if (typeof window.stream === "object") return;
-      clearTimeout(this.timeout);
-      this.timeout = setTimeout(() => {
-        clearTimeout(this.timeout);
-        navigator.mediaDevices
-          .getUserMedia({
-            video: true,
-          })
-          .then(this.fnSuccess)
-          .catch(this.fnError);
-      }, 300);
-    },
-    // 成功启动视频媒体流
-    fnSuccess(stream) {
-      console.log("stream", stream )
-      window.stream = stream; // 使流对浏览器控制台可用
-      this.videoEl.srcObject = stream;
-      console.log("this.videoEl", this.videoEl )
-      this.videoEl.play();
-    },
-    // 失败启动视频媒体流
-    fnError(error) {
-      console.log(error);
-      alert("视频媒体流获取错误" + error);
-    },
-    // 结束摄像头视频媒体
-    fnClose() {
-      this.canvasEl
-        .getContext("2d")
-        .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
-      this.videoEl.pause();
-      clearTimeout(this.timeout);
-      if (typeof window.stream === "object") {
-        window.stream.getTracks().forEach((track) => track.stop());
-        window.stream = "";
-        this.videoEl.srcObject = null;
-      }
-    },
-  },
-  beforeDestroy() {
-    this.fnClose();
-  },
-};
+    }
+  }
 </script>
 
 <style scoped>
-button {
-  height: 30px;
-  border: 2px #42b983 solid;
-  border-radius: 4px;
-  background: #42b983;
-  color: white;
-  margin: 10px;
-}
-.see {
-  position: relative;
-}
-.see canvas {
-  position: absolute;
-  top: 0;
-  left: 0;
-}
-.option {
-  padding-bottom: 20px;
-}
-.option div {
-  padding: 10px;
-  border-bottom: 2px #42b983 solid;
-}
-.option div label {
-  margin-right: 20px;
-}
+  /* 绘图canvas 不需显示隐藏即可 */
+  #screenshotCanvas{
+    display: none;
+  }
+
+  .video-box{
+    position: relative;
+    margin-left: 30px;
+    width: 320px;
+    height: 240px;
+  }
+
+  .switch-button{
+    margin-top: 30px;
+    margin-left: 30px;
+  }
+  video,canvas{
+    position: absolute;
+    top: 0;
+    left: 0;
+	border: #000000 5px solid;
+  }
 </style>

+ 263 - 0
src/components/message/iMessage.vue

@@ -0,0 +1,263 @@
+<template>
+  <div class="message">
+    <div class="input-msg">
+      <textarea name="" id="msg" cols="50" rows="5" v-model.trim="title" @keyup.enter="sendMsg"
+        :placeholder="refer"></textarea>
+      <el-button @click="sendMsg" type="primary" size="medium" > 发送信息 </el-button>
+    </div>
+    <div class="history">
+      <div class="msg-item" v-for="msg in msgList" :key="msg.id">
+        <div>
+          <h3>用户:{{msg.author}}</h3>
+          <p><small>{{msg.createTime | formatTime}}</small></p>
+          <p class="cont">{{msg.title}}</p>
+
+          <block v-if="msg.replyTime>0 && msg.reply!=''">
+            <h3 style="color: red;">官方答复</h3>
+            <p><small>{{msg.replyTime | formatTime}}</small></p>
+            <p class="cont">{{msg.reply}}</p>
+          </block>
+
+          <p class="control">
+            <el-button @click="replyMsg(msg)" type="primary" size="mini"> 回复 </el-button>
+            <el-button v-if="msg.userId==userInfo.uid" @click="delMsg(msg.postId)" type="danger" size="mini"> 删除  </el-button>
+          </p>
+        </div>
+      </div>
+    </div>
+  </div>
+</template>
+
+<script>
+  import {
+    httpServer
+  } from "@/components/httpServer/httpServer.js";
+  import { mapGetters } from "vuex";
+  import { MessageBox } from "element-ui";
+  export default {
+    created() {
+      this.loadData()
+    },
+    data() {
+      return {
+        page: 1,
+        size: 6,
+        total: 0,
+        refer:'',
+        title: "",
+        msgList: [],
+
+      }
+    },
+    watch: {
+      mediaId(){
+        this.page = 1
+        this.loadData()
+      }
+    },
+    computed: {
+    	...mapGetters("user", ["userInfo"])
+    },
+    onShow(){
+      this.loadData()
+    },
+    props:['mediaId', "id"],
+    methods: {
+      loadData(){
+        if( !this.mediaId ) return;
+        let param = {
+          mediaId: this.mediaId,
+          page: this.page,
+          size: this.size
+        }
+        httpServer( "Course.GetUserPostMediaList", param).then(res=>{
+          if( res.code != 200) return;
+          this.total = res.data.total;
+          this.msgList = res.data.list||[];
+        })
+      },
+      sendMsg() {
+        if (this.title === "") {
+          alert("留言内容为空")
+          return
+        }
+        let title = this.refer +this.title
+        let param = {title, mediaId: this.mediaId};
+        httpServer( "Course.AddPostMedia", param).then(res=>{
+          if( res.code != 200) return;
+          this.title = '';
+          this.refer = '';
+          param.userId = this.userInfo.uid;
+          param.author = this.userInfo.nickname;
+          param.createTime = parseInt(Date.now()/1000)
+          this.msgList.push( param )
+          this.total += 1
+        })
+      },
+      doDelMsg(postId){
+        let param = {postId};
+        httpServer( "Course.DeletePostMedia", {postId}).then(res=>{
+          if( res.code != 200) return;
+          this.loadData()
+        })
+      },
+      replyMsg(msg){
+        let title = msg.title||''
+        if( title.substr(0,2) == '回复'){
+          title = title.split('\n')[1];
+        }
+        this.refer = `回复 @${msg.author}:${title}\n`
+      },
+      delMsg(postId) {
+        let that = this
+        MessageBox({
+          title: "删除信息",
+          message: "删除之后不可恢复,是否确认删除!!",
+          showCancelButton: true,
+          confirmButtonText: "确定",
+          cancelButtonText: "取消",
+          beforeClose: (action, instance, done) => {
+            if (action === "confirm") {
+              instance.confirmButtonLoading = true;
+              instance.confirmButtonText = '提交中...';
+              that.doDelMsg(postId);
+              done();
+              instance.confirmButtonLoading = false;
+              that.dialogVisible = true;
+
+            } else{
+              done();
+            }
+
+          }
+        })
+      },
+    },
+    filters: {
+      formatTime: value => {
+        const date = new Date(value*1000);
+        return `${date.getFullYear()}年${(date.getMonth()+1).toString().padStart(2,0)}月${date.getDate().toString().padStart(2,0)}日 ${date.getHours().toString().padStart(2,0)}时${date.getMinutes().toString().padStart(2,0)}分${date.getSeconds().toString().padStart(2,0)}秒`
+      }
+    }
+  }
+</script>
+<!-- 如果想让这里编写的样式只对当前组件有效,style上添加一个scoped属性-->
+<style scoped>
+  .input-msg {
+    width: 80%;
+    margin: 10px auto;
+    display: flex;
+    flex-direction: column;
+    align-items: flex-end;
+  }
+
+  #msg {
+    width: 100%;
+    height: 60px;
+    padding: 10px;
+    outline: none;
+    border: 1px solid rgba(219, 73, 73, 0.466);
+    border-radius: 5px;
+    resize: none;
+    display: block;
+    margin: 10px auto;
+    font-size: 18px;
+  }
+
+  .send-msg {
+    width: 200px;
+    height: 40px;
+    border: none;
+    background-color: orangered;
+    color: wheat;
+    border-radius: 20px;
+    cursor: pointer;
+    font-size: 18px;
+  }
+
+  .send-msg:hover {
+    background-color: rgb(34, 231, 109);
+    color: #000;
+  }
+
+  .history {
+    width: 80%;
+    padding: 20px 10px;
+    margin: 10px auto;
+    background-color: #fff;
+  }
+
+  .msg-item {
+    display: flex;
+    padding: 10px;
+    border-bottom: 1px dashed #888;
+  }
+
+/*  .msg-item img {
+    width: 100px;
+    height: 100px;
+    border-radius: 10px;
+  } */
+
+  .msg-item div {
+    margin-left: 10px;
+    width: 100%;
+  }
+
+  .msg-item div h2 {
+    font-size: 22px;
+  }
+
+  .msg-item div h2 small {
+    font-size: 16px;
+    color: #888;
+    font-weight: 600;
+    margin-left: 20px;
+  }
+
+  .msg-item div p.cont {
+    font-size: 16px;
+    color: #444;
+    margin: 10px 0;
+    word-wrap: break-word;
+    word-wrap: break-word;
+    white-space: pre-wrap;
+    min-height: 30px;
+  }
+
+  .control {
+    width: 100%;
+    display: flex;
+    justify-content: flex-end;
+  }
+  .reply{
+    width: 100%;
+    display: flex;
+    justify-content: flex-end;
+  }
+  .btn {
+    width: 80px;
+    height: 30px;
+    border: 1px solid #888;
+    border-radius: 15px;
+    cursor: pointer;
+  }
+
+  .btn-edit {
+    background-color: rgb(20, 187, 247);
+  }
+
+  .btn-edit:hover {
+    background-color: rgb(17, 148, 196);
+    color: #fff;
+  }
+
+  .btn-del {
+    background-color: rgb(247, 20, 88);
+  }
+
+  .btn-del:hover {
+    background-color: rgb(192, 21, 72);
+    color: #fff;
+  }
+</style>

+ 117 - 2
src/containers/center/face/index.vue

@@ -1,8 +1,123 @@
 <template>
+  <div>
+    <div class="video-box">
+      <video id="video" width="320" height="240" preload autoplay loop muted></video>
+      <canvas id="canvas" width="320" height="240"></canvas>
+    </div>
+    <canvas id="screenshotCanvas" width="320" height="240"></canvas>
+    <div class="switch-button">
+      <el-row>
+        <el-button type="primary" @click="destroyed">关闭摄像头</el-button>
+        <el-button type="primary" @click="init">开始识别</el-button>
+      </el-row>
+    </div>
+  </div>
 </template>
 
 <script>
+  import tracking from '@/assets/tracking/build/tracking-min.js';
+  import '@/assets/tracking/build/data/face-min.js';
+
+  export default {
+    data() {
+      return {
+        trackerTask: null,
+        mediaStreamTrack: null,
+        video: null,
+        screenshotCanvas: null,
+        uploadLock: true // 上传锁
+      }
+    },
+    mounted() {
+      this.init();
+    },
+    methods: {
+      // 初始化设置
+      init() {
+        this.video = this.mediaStreamTrack = document.getElementById('video');
+        this.screenshotCanvas = document.getElementById('screenshotCanvas');
+
+        let canvas = document.getElementById('canvas');
+        let context = canvas.getContext('2d');
+
+        // 固定写法
+        let tracker = new window.tracking.ObjectTracker('face');
+        tracker.setInitialScale(4);
+        tracker.setStepSize(2);
+        tracker.setEdgesDensity(0.1);
+        //摄像头初始化
+        this.trackerTask = window.tracking.track('#video', tracker, {
+          camera: true
+        });
+
+        let _this = this;
+        tracker.on('track', function(event) {
+
+          // 检测出人脸 绘画人脸位置
+          context.clearRect(0, 0, canvas.width, canvas.height);
+          event.data.forEach(function(rect) {
+            context.strokeStyle = '#0764B7';
+            context.strokeRect(rect.x, rect.y, rect.width, rect.height);
+          });
+          // event.data.length长度为多少代表检测几张人脸
+          if(_this.uploadLock && event.data.length){
+          //上传图片
+              _this.screenshotAndUpload();
+          }
+        });
+      },
+      // 上传图片
+      screenshotAndUpload() {
+        // 上锁避免重复发送请求
+        this.uploadLock = false;
+
+        // 绘制当前帧图片转换为base64格式
+        let canvas = this.screenshotCanvas;
+        let video = this.video;
+        let ctx = canvas.getContext('2d');
+        ctx.clearRect(0, 0, canvas.width, canvas.height);
+        ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
+        let base64Img = canvas.toDataURL('image/jpeg');
+
+        // 打印出 base64Img
+        console.log('base64Img:',base64Img)
+
+        // 请求接口成功以后打开锁
+        // this.uploadLock = true;
+      },
+      //关闭摄像头
+      destroyed(){
+        if(!this.mediaStreamTrack){
+          return
+        }
+        this.mediaStreamTrack.srcObject.getTracks()[0].stop();
+        this.trackerTask.stop()
+      }
+    }
+  }
 </script>
 
-<style>
-</style>
+<style scoped>
+  /* 绘图canvas 不需显示隐藏即可 */
+  #screenshotCanvas{
+    display: none;
+  }
+
+  .video-box{
+    position: relative;
+    margin-left: 30px;
+    width: 320px;
+    height: 240px;
+  }
+
+  .switch-button{
+    margin-top: 30px;
+    margin-left: 30px;
+  }
+  video,canvas{
+    position: absolute;
+    top: 0;
+    left: 0;
+	border: #000000 5px solid;
+  }
+</style>

+ 53 - 0
src/containers/wapplayer/index.css

@@ -0,0 +1,53 @@
+.m-chapter-list {
+  position: relative;
+  margin-top: 20px;
+  margin-left: 20px;
+  border-top: 1px solid #f8f8f8;
+}
+.m-chapter-list a {
+  width: 100%;
+  height: 45px;
+  border-bottom: 1px solid #f8f8f8;
+  font-size: 16px;
+  display: block;
+  box-sizing: border-box;
+  padding-left: 6px;
+  position: relative;
+}
+.m-chapter-list .current {
+  border-left: 5px solid #3290d4;
+  background-color: #fff;
+}
+.m-chapter-list .current a {
+  color: #3290d4;
+  width: 400px;
+  background-color: #fff;
+  border-right: 2px solid #fff;
+}
+
+.m-chapter-list .current .sub-list {
+  display: block;
+}
+.m-chapter-list .current .sub-list a {
+  color: #666;
+  font-size: 14px;
+  line-height: 30px;
+}
+.m-chapter-list .current .sub-list a:before {
+  content: "";
+  border: 1px solid;
+  width: 8px;
+  border-color: transparent transparent #ddd #ddd;
+  display: inline-block;
+  margin-right: 5px;
+  position: relative;
+  top: -3px;
+}
+.m-chapter-list .current .sub-list a:hover,
+.m-chapter-list .current .sub-list a.current {
+  color: #3290d4;
+  border-left: 0;
+}
+.m-chapter-list .sub-list {
+  display: none;
+}

+ 595 - 0
src/containers/wapplayer/index.vue

@@ -0,0 +1,595 @@
+<template>
+  <div>
+    <h2 class="tc">
+      <span>{{curTimes|useTime}}</span>
+      <strong>/</strong>
+      <span>{{media.duration|useTime}}</span>
+    </h2>
+    <el-row>
+      <el-col :lg="12">
+
+        <div>
+          <video-player id="myVideo" class="video-player-box" ref="videoPlayer" @pause="onPlayerPause($event)"
+            @play="onPlayerStart($event)" @ready="playerReadied" @timeupdate="onPlayerTimeupdate($event)"
+            @ended="onPlayerEnded($event)" :globalOptions="{controls:true}" :options="options">
+          </video-player>
+
+        </div>
+
+        <div class="tc">
+          <p v-if="errMsg" style="font-size: 30px;color: red;"> {{errMsg}}</p>
+        </div>
+
+      </el-col>
+
+      <el-col :lg="12">
+        <div class="account-tit2">
+          <a :class="{'current':required===1}" @click="required=1" style="width: 30%;">必修课程</a>
+          <a :class="{'current':required===0}" @click="required=0" style="width: 30%;">选修课程</a>
+          <a :class="{'current':required===2}" @click="required=2" style="width: 30%;">讨论区</a>
+        </div>
+
+
+        <el-menu v-if="required==1" style="min-height: 400px;font-size: 10px;">
+          <ul class="m-chapter-list">
+            <li v-for="(item, index) in chapter.required" :key="index" :class="{'current':item.name==activeChapter}">
+              <a href="javascript:void(0)" style="text-decoration: none" @click="goState(item)" class="ng-binding">
+                {{parseInt(item.xs/10)}}学时 {{item.name}}</a>
+
+              <div class="sub-list" v-if="item.name==activeChapter">
+                <a v-for="(subItem,index)  in list" :key="subItem.id" v-if="subItem.chapterName == activeChapter"
+                  @click="goSubState(subItem, index)" :class="{'current':subItem.name==activeName}">
+                  <span class="media-process">
+                    <el-progress :percentage="subItem.percent" type="circle" :width="16" :height="16"
+                      :format="()=>{return ''}" v-if="subItem.percent>=100" color="green"></el-progress>
+                    <el-progress :percentage="subItem.percent" type="circle" :width="16" :height="16"
+                      :format="()=>{return ''}" v-else-if="subItem.percent>=50" color="cyan"></el-progress>
+                    <el-progress :percentage="subItem.percent" type="circle" :width="16" :height="16"
+                      :format="()=>{return ''}" v-else></el-progress>
+                  </span>
+                  <span class="media-name"> {{subItem.name}} </span>
+                </a>
+              </div>
+            </li>
+          </ul>
+        </el-menu>
+
+        <el-menu v-if="required==0">
+          <ul class="m-chapter-list" style="width: 400px;font-size: 10px;">
+            <li v-for="(item, index) in chapter.normal" :key="index" :class="{'current':item.name==activeChapter}">
+              <a href="javascript:void(0)" style="text-decoration: none" @click="goState(item)"
+                class="ng-binding">{{parseInt(item.xs/10)}}学时 {{item.name}}</a>
+
+              <div class="sub-list pt10" v-if="item.name==activeChapter">
+                <a v-for="(subItem,index) in list" :key="subItem.id" v-if="subItem.chapterName == activeChapter"
+                  @click="goSubState(subItem, index)" :class="{'current':subItem.name==activeName}">
+                  <span class="media-process">
+                    <el-progress :percentage="subItem.percent" type="circle" :width="16" :height="16"
+                      :format="()=>{return ''}" v-if="subItem.percent>=100" color="green"></el-progress>
+                    <el-progress :percentage="subItem.percent" type="circle" :width="16" :height="16"
+                      :format="()=>{return ''}" v-else-if="subItem.percent>=50" color="cyan"></el-progress>
+                    <el-progress :percentage="subItem.percent" type="circle" :width="16" :height="16"
+                      :format="()=>{return ''}" v-else></el-progress>
+                  </span>
+                  <span class="media-name"> {{subItem.name}} </span>
+                </a>
+                </a>
+              </div>
+            </li>
+          </ul>
+          <!-- <el-menu-item v-for="item in chapter.normal" :key="item">{{item}}</el-menu-item> -->
+        </el-menu>
+
+        <div v-if="required==2">
+          <i-message :media-id="media.mediaId"></i-message>
+        </div>
+
+      </el-col>
+
+    </el-row>
+    <div class="left-float" v-if="!closeFace" v-drag v-show="identifyFacePass">
+      <video ref="video" width="240" height="180" autoplay></video>
+      <canvas ref="canvas" v-show="ontakebtn" width="240" height="180"></canvas>
+    </div>
+
+    <el-dialog title="人脸认证" center :visible.sync="identifyFace" width="500px" :close-on-click-modal="false">
+      <div style="width: 240px;margin: 100px auto;">
+        <video ref="video2" width="240" height="180" autoplay></video>
+        <p style="margin-top: 20px;">当前照片:</p>
+        <canvas ref="canvas" width="240" height="180"></canvas>
+        <p v-if="errMsg" style="font-size: 30px;color: red;"> {{errMsg}}</p>
+      </div>
+    </el-dialog>
+  </div>
+</template>
+
+<script>
+  import {
+    httpServer
+  } from "@/components/httpServer/httpServer.js";
+  import md5 from 'js-md5';
+  import {
+    videoPlayer
+  } from 'vue-video-player';
+  import 'video.js/dist/video-js.css'
+  import IMessage from '@/components/message/iMessage.vue'
+  // import html2canvas from "html2canvas";
+  import {
+    MessageBox
+  } from "element-ui";
+  export default {
+    name: "Index",
+    data() {
+      return {
+        timer: false,
+        required: 1,
+        tickNum: 0,
+        prevTime: 0,
+        isReady: false,
+        ontakebtn: false,
+        identifyFace: false,
+        identifyFacePass: false,
+        activeChapter: '',
+        activeName: '',
+        curTimes: 0,
+        errMsg: '',
+        errCount: 0,
+        onPlay: false,
+        media: {},
+        options: {
+          autoplay: false, // 如果true,浏览器准备好时开始回放。
+          muted: false, // 默认情况下将会消除任何音频。
+          loop: false, // 导致视频一结束就重新开始。
+          preload: "auto", // 建议浏览器在<video>加载元素后是否应该开始下载视频数据。auto浏览器选择最佳行为,立即开始加载视频(如果浏览器支持)
+          language: 'zh-CN',
+          aspectRatio: '4:3', // 将播放器置于流畅模式,并在计算播放器的动态大小时使用该值。值应该代表一个比例 - 用冒号分隔的两个数字(例如"16:9"或"4:3")
+          fluid: true, // 当true时,Video.js player将拥有流体大小。换句话说,它将按比例缩放以适应其容器。
+          sources: [{
+            src: 'https://media.ndjsxh.cn:18443/hls/20402/20402.m3u8',
+            type: "application/x-mpegURL"
+          }],
+          poster: '',
+          notSupportedMessage: '无法播放媒体源', // 允许覆盖Video.js无法播放媒体源时显示的默认信息。
+          playtimes: ""
+        },
+        heartbeat: 5,
+        maxErrorCount: 20,
+        collectBeat: 120,
+        closeFace: true,
+        chapter: 101,
+        list: [],
+      }
+    },
+    components: {
+      videoPlayer,
+      IMessage
+    },
+    filters: {
+      useTime(val) {
+        let timestr = ""
+        let hour = parseInt(val / 3600);
+        let min = parseInt(val / 60 % 60);
+        let sec = parseInt(val % 60);
+        if (hour < 10) hour = "0" + hour;
+        if (min < 10) min = "0" + min;
+        if (sec < 10) sec = "0" + sec;
+        return hour + ":" + min + ":" + sec
+      }
+    },
+    beforeDestroy() {
+      this.stopTick()
+      this.reportErr("play", 'destroy');
+    },
+    computed: {
+      player() {
+        return this.$refs.videoPlayer.player
+      }
+    },
+    beforeDestroy() {
+      this.destroyTimer()
+      this.closeCamera("video")
+    },
+    created() {
+      this.startTick()
+      this.startMonitor();
+      this.activeChapter = this.media.chapterName;
+      this.activeName = this.media.name
+    },
+    methods: {
+      photograph(ref) {
+        let identify = (ref == "video2");
+        let ctx = this.$refs["canvas"].getContext("2d");
+        this.ontakebtn = true
+        ctx.drawImage(this.$refs[ref], 0, 0, 240, 180);
+        // 转base64格式、图片格式转换、图片质量压缩
+        let imgBase64 = this.$refs["canvas"].toDataURL("image/jpeg", 1); // 由字节转换为KB 判断大小
+        this.ontakebtn = false
+        let str = imgBase64.replace("data:image/jpeg;base64,", "");
+        let param = {
+          id: this.media.id,
+          ref,
+          image: str
+        }
+        httpServer("course.collect", param).then(res => {
+          let {
+            msg,
+            pause
+          } = res.data
+          this.errMsg = msg || '';
+          if (msg) {
+            this.errCount++
+          } else {
+            if (identify) {
+              this.identifyPassAndPlay()
+            }
+            this.errCount = 0;
+          }
+          if (!identify && this.errCount > this.maxErrorCount) {
+            this.doPause();
+          }
+        })
+      },
+      destroyTimer() {
+        if (this.timer) clearInterval(this.timer);
+      },
+      goState(item) {
+        if (item.name == this.activeChapter) {
+          this.activeChapter = ""
+        } else {
+          this.activeChapter = item.name;
+        }
+      },
+      goSubState(item, index) {
+        this.$emit('loadMedia', item, index)
+      },
+      callCamera(ref) {
+        // H5调用电脑摄像头API
+        if (this.closeFace) {
+          this.identifyFacePass = true;
+          return;
+        }
+        navigator.mediaDevices
+          .getUserMedia({
+            video: true,
+          })
+          .then((success) => {
+            // 摄像头开启成功
+            this.$refs[ref].srcObject = success;
+            // 实时拍照效果
+            this.$refs[ref].play();
+          })
+          .catch((error) => {
+            this.$message.error(
+              "摄像头开启失败,请检查摄像头是否可用!或者打开摄影头"
+            );
+            console.error("摄像头开启失败,请检查摄像头是否可用!");
+          });
+      },
+      closeCamera(ref) {
+        if (!this.$refs[ref]) return;
+        if (!this.$refs[ref].srcObject) return;
+        let stream = this.$refs[ref].srcObject;
+        let tracks = stream.getTracks();
+        tracks.forEach((track) => {
+          track.stop();
+        });
+        this.$refs[ref].srcObject = null;
+      },
+      startTick() {
+        let tick = this.tryTick;
+        this.destroyTimer();
+        this.tickNum = 0;
+        this.timer = setTimeout(tick, 1 * 1000);
+      },
+      stopTick() {
+        if (this.timer) clearInterval(this.timer);
+      },
+      tryTick() {
+        let that = this;
+        try {
+          that.tick()
+        } catch (err) {
+          that.reportErr("play", '' + err.message)
+        }
+        this.destroyTimer()
+        this.timer = setTimeout(this.tryTick, 1 * 1000);
+      },
+      playerReadied(audio) {
+        let that = this;
+        let {
+          position,
+          duration
+        } = this.media
+        if (position > 5 && position < duration) {
+          setTimeout(() => {
+            this.setposition(position)
+          }, 2000)
+        }
+        this.isReady = true
+      },
+      onPlayerTimeupdate(player) {
+        let curTimes = player.cache_.currentTime;
+        if (curTimes > 30 && curTimes > this.curTimes + 2) {
+          console.log("return", curTimes, this.media.position)
+          player.currentTime(this.curTimes);
+          return;
+        }
+        this.curTimes = curTimes
+      },
+      setposition(position) {
+        if (position > this.media.duration) position = this.media.duration;
+        let player = this.$refs.videoPlayer.player;
+        let res = player.currentTime(position);
+        // player.play()
+        this.curTimes = position;
+        if (this.media.isFinish) return;
+        if (this.media.position >= this.media.duration - 2 * this.heartbeat && !this.media.isFinish) {
+          this.tick(true)
+        }
+      },
+      onPlayerPause(event) {
+        this.reportErr("play", 'pause');
+        this.onPlay = false
+      },
+      onPlayerEnded(event) {
+        this.reportErr("play", 'end');
+        this.tick(true)
+      },
+      onClose() {
+        this.reportErr("play", 'close')
+        this.doPause()
+        this.$emit("close")
+        this.closeCamera("video")
+      },
+      doPause() {
+        this.onPlay = false
+        let myPlayer = this.$refs.videoPlayer.player;
+        myPlayer && myPlayer.pause()
+      },
+      doPlay() {
+        this.onPlay = true
+        this.startTick();
+        if (!this.$refs.videoPlayer || !this.$refs.videoPlayer.player) return;
+        // if (!this.dialog) return this.doPause();
+        let myPlayer = this.$refs.videoPlayer.player;
+        myPlayer && myPlayer.play()
+        this.tickNum = 0
+      },
+      onPlayerStart(player) {
+        console.log("onPlayerStart")
+        this.onPlay = true
+        if (!this.identifyFacePass) {
+          this.startIdentify()
+        }
+        this.reportErr("play", 'start');
+        this.startTick();
+      },
+      startIdentify() {
+        if (!this.closeFace) {
+          this.identifyFace = true
+          this.identifyFacePass = false
+          this.closeCamera("video")
+          this.callCamera("video2")
+        }
+        this.startTick()
+      },
+      identifyPassAndPlay() {
+        this.identifyFacePass = true
+        this.identifyFace = false;
+        this.closeCamera("video2")
+        this.callCamera("video")
+        this.$message.successMsg("人脸认证通过", 2)
+        this.doPlay()
+      },
+      reportErr(action, msg) {
+        httpServer("course.report", {
+          action,
+          msg
+        })
+      },
+      startMonitor() {
+        let that = this
+        document.addEventListener("visibilitychange", function() {
+          // || document.hidden
+          if (document.visibilityState == "hidden") {
+            // that.doPause( )
+            that.reportErr("play", 'hidden');
+          } else {
+            that.reportErr("play", 'show');
+            // that.doPlay()
+          }
+        });
+      },
+      tick(force = false) {
+        let media = this.media;
+        this.tickNum++
+        // 人脸认证期间
+        if (!this.closeFace && !this.identifyFacePass) {
+          if (this.onPlay) {
+            this.doPause()
+          }
+          // 人脸认证
+          if (this.tickNum % 3 == 1) {
+            this.photograph("video2");
+          }
+          return;
+        };
+        // 未开始
+        if (!force && !this.onPlay) {
+          return;
+        }
+
+        // 已经完成
+        if (this.media.isFinish) {
+          console.log("finish")
+          return;
+        }
+        // 每5秒一次心跳
+        if (this.tickNum % this.heartbeat != 0) {
+          return;
+        }
+        if (this.errCount >= this.maxErrorCount) {
+          this.$message.errorMsg("人脸不在摄像头上", 5);
+          this.destroyTimer()
+          this.$emit("close");
+          return;
+        }
+
+        let heartBeat = parseInt(this.tickNum / this.heartbeat);
+        // 异常 10秒检查
+        if (!this.closeFace) {
+          if (this.errCount > 0) {
+            this.photograph("video")
+          } else if (heartBeat % this.collectBeat == 1) {
+            this.photograph("video")
+          }
+        }
+        // 主动暂停
+        let myPlayer = this.$refs.videoPlayer.player;
+        let curTimes = parseInt(myPlayer.currentTime());
+        // 后退无心跳
+        if (!force) {
+          if (curTimes < this.media.position + this.heartbeat) {
+            return;
+          }
+        }
+
+        let isFinish = force ? 1 : 0
+        if (curTimes >= media.duration) isFinish = 1;
+        //  拉到后面
+        if (!isFinish) {
+          if (!this.onPlay) return;
+        }
+        // 强制完成
+        let param = {
+          id: media.id,
+          position: curTimes,
+          isFinish
+        };
+        httpServer("course.tick", param, true).then(res => {
+          if (res.code == 200) {
+            let {
+              skip,
+              position,
+              pause,
+              closed
+            } = res.data
+            if (pause || closed) {
+              this.doPause();
+              this.$emit("close")
+              if (closed) {
+                this.$message.errorMsg("课程关闭学习", 5);
+              } else if (pause) {
+                this.$message.errorMsg("多处同时播放视频", 5);
+              }
+              return
+            }
+            if (!skip) {
+              setTimeout(() => {
+                this.setposition(position)
+              }, 2000);
+            };
+
+            Object.assign(param, res.data)
+            this.$emit("update", param)
+          }
+        })
+      }
+    }
+  }
+</script>
+
+<style>
+  @import url("./index.css");
+
+  .video-js {
+    .vjs-control-bar {
+      .vjs-icon-custombutton {
+        font-family: VideoJS;
+        font-weight: normal;
+        font-style: normal;
+      }
+
+      .vjs-icon-custombutton:before {
+        content: "\f108";
+        font-size: 1.8em;
+        line-height: 1.67;
+      }
+    }
+  }
+
+  .left-float {
+    width: 240px;
+    height: 180px;
+    background-color: #8bbdf5;
+    position: fixed;
+    transition: bottom ease .9s;
+    z-index: 0;
+    left: 60px;
+    top: 120px;
+    text-align: center;
+    border-radius: 5px;
+  }
+
+  .p-process {
+    width: 100%;
+    margin: 20px auto;
+    height: 30px;
+  }
+
+  .media-footer {
+    padding: 0px 30px;
+    text-align: left;
+    line-height: 40px !important;
+    bottom: -10px;
+  }
+
+  .media-center {
+    text-align: center;
+    padding: 0px;
+  }
+
+  .media-time {
+    font-size: 18px;
+    vertical-align: center;
+  }
+
+  .media-select {
+    white-space: nowrap;
+    text-align: right;
+    line-height: 40px !important;
+    float: right;
+    margin: 0px !important;
+  }
+
+  .bicon {
+    font-size: 28px !important;
+    padding: 4px !important;
+  }
+
+  .media-el-select {
+    font-size: 28px !important;
+    width: 80px;
+    padding: -4px auto !important;
+  }
+
+  .media-name {
+    width: 300px;
+    margin-left: 16px;
+    line-height: 24px;
+    white-space: nowrap;
+    overflow: hidden;
+    align-items: center;
+    text-overflow: ellipsis;
+  }
+
+  .media-process {
+    width: 8px;
+    height: 8px;
+    align-items: center;
+  }
+
+  .vjs-tech {
+    pointer-events: none;
+  }
+</style>

+ 1 - 1
src/settings.js

@@ -1,5 +1,5 @@
 module.exports = {
-  title: '福建鸿锵教育科技有限公司'
+  title: '宁德聚仁教育服务有限公司'
   ,beian:'闽ICP备2021006867号'
   ,exam:{
     username:'福建省爱数信息技术有限公司'

Some files were not shown because too many files changed in this diff