diff --git a/docs/demos/eye-tracking-with-webgazer.html b/docs/demos/eye-tracking-with-webgazer.html
index f8fe6855..682b21e7 100644
--- a/docs/demos/eye-tracking-with-webgazer.html
+++ b/docs/demos/eye-tracking-with-webgazer.html
@@ -9,8 +9,8 @@
-
-
+
+
a; a++)
+ if ("undefined" != typeof this[a]) return this[a];
+ },
+ l = function () {
+ for (var a = this.length; --a; ) if ("undefined" != typeof this[a]) return this[a];
+ },
+ m = Object.prototype.toString,
+ n = String,
+ o =
+ Array.isArray ||
+ function (a) {
+ return a instanceof Array || "[object Array]" == m.call(a);
+ };
+ (eve = function (a, d) {
+ var e,
+ f = c,
+ g = Array.prototype.slice.call(arguments, 2),
+ h = eve.listeners(a),
+ j = 0,
+ m = [],
+ n = {},
+ o = [],
+ p = b;
+ (o.firstDefined = k), (o.lastDefined = l), (b = a), (c = 0);
+ for (var q = 0, r = h.length; r > q; q++)
+ "zIndex" in h[q] && (m.push(h[q].zIndex), h[q].zIndex < 0 && (n[h[q].zIndex] = h[q]));
+ for (m.sort(i); m[j] < 0; ) if (((e = n[m[j++]]), o.push(e.apply(d, g)), c)) return (c = f), o;
+ for (q = 0; r > q; q++)
+ if (((e = h[q]), "zIndex" in e))
+ if (e.zIndex == m[j]) {
+ if ((o.push(e.apply(d, g)), c)) break;
+ do if ((j++, (e = n[m[j]]), e && o.push(e.apply(d, g)), c)) break;
+ while (e);
+ } else n[e.zIndex] = e;
+ else if ((o.push(e.apply(d, g)), c)) break;
+ return (c = f), (b = p), o;
+ }),
+ (eve._events = j),
+ (eve.listeners = function (a) {
+ var b,
+ c,
+ d,
+ e,
+ g,
+ i,
+ k,
+ l,
+ m = o(a) ? a : a.split(f),
+ n = j,
+ p = [n],
+ q = [];
+ for (e = 0, g = m.length; g > e; e++) {
+ for (l = [], i = 0, k = p.length; k > i; i++)
+ for (n = p[i].n, c = [n[m[e]], n[h]], d = 2; d--; )
+ (b = c[d]), b && (l.push(b), (q = q.concat(b.f || [])));
+ p = l;
+ }
+ return q;
+ }),
+ (eve.separator = function (a) {
+ a
+ ? ((a = n(a).replace(/(?=[\.\^\]\[\-])/g, "\\")), (a = "[" + a + "]"), (f = new RegExp(a)))
+ : (f = /[\.\/]/);
+ }),
+ (eve.on = function (a, b) {
+ if ("function" != typeof b) return function () {};
+ for (var c = o(a) ? (o(a[0]) ? a : [a]) : n(a).split(g), d = 0, e = c.length; e > d; d++)
+ !(function (a) {
+ for (var c, d = o(a) ? a : n(a).split(f), e = j, g = 0, h = d.length; h > g; g++)
+ (e = e.n), (e = (e.hasOwnProperty(d[g]) && e[d[g]]) || (e[d[g]] = { n: {} }));
+ for (e.f = e.f || [], g = 0, h = e.f.length; h > g; g++)
+ if (e.f[g] == b) {
+ c = !0;
+ break;
+ }
+ !c && e.f.push(b);
+ })(c[d]);
+ return function (a) {
+ +a == +a && (b.zIndex = +a);
+ };
+ }),
+ (eve.f = function (a) {
+ var b = [].slice.call(arguments, 1);
+ return function () {
+ eve.apply(null, [a, null].concat(b).concat([].slice.call(arguments, 0)));
+ };
+ }),
+ (eve.stop = function () {
+ c = 1;
+ }),
+ (eve.nt = function (a) {
+ var c = o(b) ? b.join(".") : b;
+ return a ? new RegExp("(?:\\.|\\/|^)" + a + "(?:\\.|\\/|$)").test(c) : c;
+ }),
+ (eve.nts = function () {
+ return o(b) ? b : b.split(f);
+ }),
+ (eve.off = eve.unbind =
+ function (a, b) {
+ if (!a) return void (eve._events = j = { n: {} });
+ var c = o(a) ? (o(a[0]) ? a : [a]) : n(a).split(g);
+ if (c.length > 1) for (var d = 0, i = c.length; i > d; d++) eve.off(c[d], b);
+ else {
+ c = o(a) ? a : n(a).split(f);
+ var k,
+ l,
+ m,
+ d,
+ i,
+ p,
+ q,
+ r = [j],
+ s = [];
+ for (d = 0, i = c.length; i > d; d++)
+ for (p = 0; p < r.length; p += m.length - 2) {
+ if (((m = [p, 1]), (k = r[p].n), c[d] != h))
+ k[c[d]] && (m.push(k[c[d]]), s.unshift({ n: k, name: c[d] }));
+ else for (l in k) k[e](l) && (m.push(k[l]), s.unshift({ n: k, name: l }));
+ r.splice.apply(r, m);
+ }
+ for (d = 0, i = r.length; i > d; d++)
+ for (k = r[d]; k.n; ) {
+ if (b) {
+ if (k.f) {
+ for (p = 0, q = k.f.length; q > p; p++)
+ if (k.f[p] == b) {
+ k.f.splice(p, 1);
+ break;
+ }
+ !k.f.length && delete k.f;
+ }
+ for (l in k.n)
+ if (k.n[e](l) && k.n[l].f) {
+ var t = k.n[l].f;
+ for (p = 0, q = t.length; q > p; p++)
+ if (t[p] == b) {
+ t.splice(p, 1);
+ break;
+ }
+ !t.length && delete k.n[l].f;
+ }
+ } else {
+ delete k.f;
+ for (l in k.n) k.n[e](l) && k.n[l].f && delete k.n[l].f;
+ }
+ k = k.n;
+ }
+ a: for (d = 0, i = s.length; i > d; d++) {
+ k = s[d];
+ for (l in k.n[k.name].f) continue a;
+ for (l in k.n[k.name].n) continue a;
+ delete k.n[k.name];
+ }
+ }
+ }),
+ (eve.once = function (a, b) {
+ var c = function () {
+ return eve.off(a, c), b.apply(this, arguments);
+ };
+ return eve.on(a, c);
+ }),
+ (eve.version = d),
+ (eve.toString = function () {
+ return "You are running Eve " + d;
+ }),
+ "undefined" != typeof module && module.exports
+ ? (module.exports = eve)
+ : "function" == typeof define && define.amd
+ ? define("eve", [], function () {
+ return eve;
+ })
+ : (a.eve = eve);
+})(this),
+ (function (a, b) {
+ if ("function" == typeof define && define.amd)
+ define(["eve"], function (c) {
+ return b(a, c);
+ });
+ else if ("undefined" != typeof exports) {
+ var c = require("eve");
+ module.exports = b(a, c);
+ } else b(a, a.eve);
+ })(window || this, function (a, b) {
+ var c = (function (b) {
+ var c,
+ d = {},
+ e =
+ a.requestAnimationFrame ||
+ a.webkitRequestAnimationFrame ||
+ a.mozRequestAnimationFrame ||
+ a.oRequestAnimationFrame ||
+ a.msRequestAnimationFrame ||
+ function (a) {
+ return setTimeout(a, 16, new Date().getTime()), !0;
+ },
+ f =
+ Array.isArray ||
+ function (a) {
+ return a instanceof Array || "[object Array]" == Object.prototype.toString.call(a);
+ },
+ g = 0,
+ h = "M" + (+new Date()).toString(36),
+ i = function () {
+ return h + (g++).toString(36);
+ },
+ j =
+ Date.now ||
+ function () {
+ return +new Date();
+ },
+ k = function (a) {
+ var b = this;
+ if (null == a) return b.s;
+ var c = b.s - a;
+ (b.b += b.dur * c), (b.B += b.dur * c), (b.s = a);
+ },
+ l = function (a) {
+ var b = this;
+ return null == a ? b.spd : void (b.spd = a);
+ },
+ m = function (a) {
+ var b = this;
+ return null == a ? b.dur : ((b.s = (b.s * a) / b.dur), void (b.dur = a));
+ },
+ n = function () {
+ var a = this;
+ delete d[a.id], a.update(), b("mina.stop." + a.id, a);
+ },
+ o = function () {
+ var a = this;
+ a.pdif || (delete d[a.id], a.update(), (a.pdif = a.get() - a.b));
+ },
+ p = function () {
+ var a = this;
+ a.pdif && ((a.b = a.get() - a.pdif), delete a.pdif, (d[a.id] = a), r());
+ },
+ q = function () {
+ var a,
+ b = this;
+ if (f(b.start)) {
+ a = [];
+ for (var c = 0, d = b.start.length; d > c; c++)
+ a[c] = +b.start[c] + (b.end[c] - b.start[c]) * b.easing(b.s);
+ } else a = +b.start + (b.end - b.start) * b.easing(b.s);
+ b.set(a);
+ },
+ r = function (a) {
+ if (!a) return void (c || (c = e(r)));
+ var f = 0;
+ for (var g in d)
+ if (d.hasOwnProperty(g)) {
+ var h = d[g],
+ i = h.get();
+ f++,
+ (h.s = (i - h.b) / (h.dur / h.spd)),
+ h.s >= 1 &&
+ (delete d[g],
+ (h.s = 1),
+ f--,
+ (function (a) {
+ setTimeout(function () {
+ b("mina.finish." + a.id, a);
+ });
+ })(h)),
+ h.update();
+ }
+ c = f ? e(r) : !1;
+ },
+ s = function (a, b, c, e, f, g, h) {
+ var j = {
+ id: i(),
+ start: a,
+ end: b,
+ b: c,
+ s: 0,
+ dur: e - c,
+ spd: 1,
+ get: f,
+ set: g,
+ easing: h || s.linear,
+ status: k,
+ speed: l,
+ duration: m,
+ stop: n,
+ pause: o,
+ resume: p,
+ update: q,
+ };
+ d[j.id] = j;
+ var t,
+ u = 0;
+ for (t in d) if (d.hasOwnProperty(t) && (u++, 2 == u)) break;
+ return 1 == u && r(), j;
+ };
+ return (
+ (s.time = j),
+ (s.getById = function (a) {
+ return d[a] || null;
+ }),
+ (s.linear = function (a) {
+ return a;
+ }),
+ (s.easeout = function (a) {
+ return Math.pow(a, 1.7);
+ }),
+ (s.easein = function (a) {
+ return Math.pow(a, 0.48);
+ }),
+ (s.easeinout = function (a) {
+ if (1 == a) return 1;
+ if (0 == a) return 0;
+ var b = 0.48 - a / 1.04,
+ c = Math.sqrt(0.1734 + b * b),
+ d = c - b,
+ e = Math.pow(Math.abs(d), 1 / 3) * (0 > d ? -1 : 1),
+ f = -c - b,
+ g = Math.pow(Math.abs(f), 1 / 3) * (0 > f ? -1 : 1),
+ h = e + g + 0.5;
+ return 3 * (1 - h) * h * h + h * h * h;
+ }),
+ (s.backin = function (a) {
+ if (1 == a) return 1;
+ var b = 1.70158;
+ return a * a * ((b + 1) * a - b);
+ }),
+ (s.backout = function (a) {
+ if (0 == a) return 0;
+ a -= 1;
+ var b = 1.70158;
+ return a * a * ((b + 1) * a + b) + 1;
+ }),
+ (s.elastic = function (a) {
+ return a == !!a
+ ? a
+ : Math.pow(2, -10 * a) * Math.sin(((a - 0.075) * (2 * Math.PI)) / 0.3) + 1;
+ }),
+ (s.bounce = function (a) {
+ var b,
+ c = 7.5625,
+ d = 2.75;
+ return (
+ 1 / d > a
+ ? (b = c * a * a)
+ : 2 / d > a
+ ? ((a -= 1.5 / d), (b = c * a * a + 0.75))
+ : 2.5 / d > a
+ ? ((a -= 2.25 / d), (b = c * a * a + 0.9375))
+ : ((a -= 2.625 / d), (b = c * a * a + 0.984375)),
+ b
+ );
+ }),
+ (a.mina = s),
+ s
+ );
+ })("undefined" == typeof b ? function () {} : b),
+ d = (function (a) {
+ function c(a, b) {
+ if (a) {
+ if (a.nodeType) return w(a);
+ if (e(a, "array") && c.set) return c.set.apply(c, a);
+ if (a instanceof s) return a;
+ if (null == b)
+ try {
+ return (a = y.doc.querySelector(String(a))), w(a);
+ } catch (d) {
+ return null;
+ }
+ }
+ return (a = null == a ? "100%" : a), (b = null == b ? "100%" : b), new v(a, b);
+ }
+ function d(a, b) {
+ if (b) {
+ if (
+ ("#text" == a && (a = y.doc.createTextNode(b.text || b["#text"] || "")),
+ "#comment" == a && (a = y.doc.createComment(b.text || b["#text"] || "")),
+ "string" == typeof a && (a = d(a)),
+ "string" == typeof b)
+ )
+ return 1 == a.nodeType
+ ? "xlink:" == b.substring(0, 6)
+ ? a.getAttributeNS(T, b.substring(6))
+ : "xml:" == b.substring(0, 4)
+ ? a.getAttributeNS(U, b.substring(4))
+ : a.getAttribute(b)
+ : "text" == b
+ ? a.nodeValue
+ : null;
+ if (1 == a.nodeType) {
+ for (var c in b)
+ if (b[z](c)) {
+ var e = A(b[c]);
+ e
+ ? "xlink:" == c.substring(0, 6)
+ ? a.setAttributeNS(T, c.substring(6), e)
+ : "xml:" == c.substring(0, 4)
+ ? a.setAttributeNS(U, c.substring(4), e)
+ : a.setAttribute(c, e)
+ : a.removeAttribute(c);
+ }
+ } else "text" in b && (a.nodeValue = b.text);
+ } else a = y.doc.createElementNS(U, a);
+ return a;
+ }
+ function e(a, b) {
+ return (
+ (b = A.prototype.toLowerCase.call(b)),
+ "finite" == b
+ ? isFinite(a)
+ : "array" == b && (a instanceof Array || (Array.isArray && Array.isArray(a)))
+ ? !0
+ : ("null" == b && null === a) ||
+ (b == typeof a && null !== a) ||
+ ("object" == b && a === Object(a)) ||
+ J.call(a).slice(8, -1).toLowerCase() == b
+ );
+ }
+ function f(a) {
+ if ("function" == typeof a || Object(a) !== a) return a;
+ var b = new a.constructor();
+ for (var c in a) a[z](c) && (b[c] = f(a[c]));
+ return b;
+ }
+ function h(a, b) {
+ for (var c = 0, d = a.length; d > c; c++)
+ if (a[c] === b) return a.push(a.splice(c, 1)[0]);
+ }
+ function i(a, b, c) {
+ function d() {
+ var e = Array.prototype.slice.call(arguments, 0),
+ f = e.join("␀"),
+ g = (d.cache = d.cache || {}),
+ i = (d.count = d.count || []);
+ return g[z](f)
+ ? (h(i, f), c ? c(g[f]) : g[f])
+ : (i.length >= 1e3 && delete g[i.shift()],
+ i.push(f),
+ (g[f] = a.apply(b, e)),
+ c ? c(g[f]) : g[f]);
+ }
+ return d;
+ }
+ function j(a, b, c, d, e, f) {
+ if (null == e) {
+ var g = a - c,
+ h = b - d;
+ return g || h ? (180 + (180 * D.atan2(-h, -g)) / H + 360) % 360 : 0;
+ }
+ return j(a, b, e, f) - j(c, d, e, f);
+ }
+ function k(a) {
+ return ((a % 360) * H) / 180;
+ }
+ function l(a) {
+ return ((180 * a) / H) % 360;
+ }
+ function m(a) {
+ var b = [];
+ return (
+ (a = a.replace(/(?:^|\s)(\w+)\(([^)]+)\)/g, function (a, c, d) {
+ return (
+ (d = d.split(/\s*,\s*|\s+/)),
+ "rotate" == c && 1 == d.length && d.push(0, 0),
+ "scale" == c &&
+ (d.length > 2 ? (d = d.slice(0, 2)) : 2 == d.length && d.push(0, 0),
+ 1 == d.length && d.push(d[0], 0, 0)),
+ "skewX" == c
+ ? b.push(["m", 1, 0, D.tan(k(d[0])), 1, 0, 0])
+ : "skewY" == c
+ ? b.push(["m", 1, D.tan(k(d[0])), 0, 1, 0, 0])
+ : b.push([c.charAt(0)].concat(d)),
+ a
+ );
+ })),
+ b
+ );
+ }
+ function n(a, b) {
+ var d = aa(a),
+ e = new c.Matrix();
+ if (d)
+ for (var f = 0, g = d.length; g > f; f++) {
+ var h,
+ i,
+ j,
+ k,
+ l,
+ m = d[f],
+ n = m.length,
+ o = A(m[0]).toLowerCase(),
+ p = m[0] != o,
+ q = p ? e.invert() : 0;
+ "t" == o && 2 == n
+ ? e.translate(m[1], 0)
+ : "t" == o && 3 == n
+ ? p
+ ? ((h = q.x(0, 0)),
+ (i = q.y(0, 0)),
+ (j = q.x(m[1], m[2])),
+ (k = q.y(m[1], m[2])),
+ e.translate(j - h, k - i))
+ : e.translate(m[1], m[2])
+ : "r" == o
+ ? 2 == n
+ ? ((l = l || b), e.rotate(m[1], l.x + l.width / 2, l.y + l.height / 2))
+ : 4 == n &&
+ (p
+ ? ((j = q.x(m[2], m[3])), (k = q.y(m[2], m[3])), e.rotate(m[1], j, k))
+ : e.rotate(m[1], m[2], m[3]))
+ : "s" == o
+ ? 2 == n || 3 == n
+ ? ((l = l || b), e.scale(m[1], m[n - 1], l.x + l.width / 2, l.y + l.height / 2))
+ : 4 == n
+ ? p
+ ? ((j = q.x(m[2], m[3])), (k = q.y(m[2], m[3])), e.scale(m[1], m[1], j, k))
+ : e.scale(m[1], m[1], m[2], m[3])
+ : 5 == n &&
+ (p
+ ? ((j = q.x(m[3], m[4])), (k = q.y(m[3], m[4])), e.scale(m[1], m[2], j, k))
+ : e.scale(m[1], m[2], m[3], m[4]))
+ : "m" == o && 7 == n && e.add(m[1], m[2], m[3], m[4], m[5], m[6]);
+ }
+ return e;
+ }
+ function o(a) {
+ var b =
+ (a.node.ownerSVGElement && w(a.node.ownerSVGElement)) ||
+ (a.node.parentNode && w(a.node.parentNode)) ||
+ c.select("svg") ||
+ c(0, 0),
+ d = b.select("defs"),
+ e = null == d ? !1 : d.node;
+ return e || (e = u("defs", b.node).node), e;
+ }
+ function p(a) {
+ return (a.node.ownerSVGElement && w(a.node.ownerSVGElement)) || c.select("svg");
+ }
+ function q(a, b, c) {
+ function e(a) {
+ if (null == a) return I;
+ if (a == +a) return a;
+ d(j, { width: a });
+ try {
+ return j.getBBox().width;
+ } catch (b) {
+ return 0;
+ }
+ }
+ function f(a) {
+ if (null == a) return I;
+ if (a == +a) return a;
+ d(j, { height: a });
+ try {
+ return j.getBBox().height;
+ } catch (b) {
+ return 0;
+ }
+ }
+ function g(d, e) {
+ null == b
+ ? (i[d] = e(a.attr(d) || 0))
+ : d == b && (i = e(null == c ? a.attr(d) || 0 : c));
+ }
+ var h = p(a).node,
+ i = {},
+ j = h.querySelector(".svg---mgr");
+ switch (
+ (j ||
+ ((j = d("rect")),
+ d(j, { x: -9e9, y: -9e9, width: 10, height: 10, class: "svg---mgr", fill: "none" }),
+ h.appendChild(j)),
+ a.type)
+ ) {
+ case "rect":
+ g("rx", e), g("ry", f);
+ case "image":
+ g("width", e), g("height", f);
+ case "text":
+ g("x", e), g("y", f);
+ break;
+ case "circle":
+ g("cx", e), g("cy", f), g("r", e);
+ break;
+ case "ellipse":
+ g("cx", e), g("cy", f), g("rx", e), g("ry", f);
+ break;
+ case "line":
+ g("x1", e), g("x2", e), g("y1", f), g("y2", f);
+ break;
+ case "marker":
+ g("refX", e), g("markerWidth", e), g("refY", f), g("markerHeight", f);
+ break;
+ case "radialGradient":
+ g("fx", e), g("fy", f);
+ break;
+ case "tspan":
+ g("dx", e), g("dy", f);
+ break;
+ default:
+ g(b, e);
+ }
+ return h.removeChild(j), i;
+ }
+ function r(a) {
+ e(a, "array") || (a = Array.prototype.slice.call(arguments, 0));
+ for (var b = 0, c = 0, d = this.node; this[b]; ) delete this[b++];
+ for (b = 0; b < a.length; b++)
+ "set" == a[b].type
+ ? a[b].forEach(function (a) {
+ d.appendChild(a.node);
+ })
+ : d.appendChild(a[b].node);
+ var f = d.childNodes;
+ for (b = 0; b < f.length; b++) this[c++] = w(f[b]);
+ return this;
+ }
+ function s(a) {
+ if (a.snap in V) return V[a.snap];
+ var b;
+ try {
+ b = a.ownerSVGElement;
+ } catch (c) {}
+ (this.node = a), b && (this.paper = new v(b)), (this.type = a.tagName || a.nodeName);
+ var d = (this.id = S(this));
+ if (
+ ((this.anims = {}),
+ (this._ = { transform: [] }),
+ (a.snap = d),
+ (V[d] = this),
+ "g" == this.type && (this.add = r),
+ this.type in { g: 1, mask: 1, pattern: 1, symbol: 1 })
+ )
+ for (var e in v.prototype) v.prototype[z](e) && (this[e] = v.prototype[e]);
+ }
+ function t(a) {
+ this.node = a;
+ }
+ function u(a, b) {
+ var c = d(a);
+ b.appendChild(c);
+ var e = w(c);
+ return e;
+ }
+ function v(a, b) {
+ var c,
+ e,
+ f,
+ g = v.prototype;
+ if (a && a.tagName && "svg" == a.tagName.toLowerCase()) {
+ if (a.snap in V) return V[a.snap];
+ var h = a.ownerDocument;
+ (c = new s(a)),
+ (e = a.getElementsByTagName("desc")[0]),
+ (f = a.getElementsByTagName("defs")[0]),
+ e ||
+ ((e = d("desc")),
+ e.appendChild(h.createTextNode("Created with Snap")),
+ c.node.appendChild(e)),
+ f || ((f = d("defs")), c.node.appendChild(f)),
+ (c.defs = f);
+ for (var i in g) g[z](i) && (c[i] = g[i]);
+ c.paper = c.root = c;
+ } else
+ (c = u("svg", y.doc.body)), d(c.node, { height: b, version: 1.1, width: a, xmlns: U });
+ return c;
+ }
+ function w(a) {
+ return a
+ ? a instanceof s || a instanceof t
+ ? a
+ : a.tagName && "svg" == a.tagName.toLowerCase()
+ ? new v(a)
+ : a.tagName && "object" == a.tagName.toLowerCase() && "image/svg+xml" == a.type
+ ? new v(a.contentDocument.getElementsByTagName("svg")[0])
+ : new s(a)
+ : a;
+ }
+ function x(a, b) {
+ for (var c = 0, d = a.length; d > c; c++) {
+ var e = { type: a[c].type, attr: a[c].attr() },
+ f = a[c].children();
+ b.push(e), f.length && x(f, (e.childNodes = []));
+ }
+ }
+ (c.version = "0.5.1"),
+ (c.toString = function () {
+ return "Snap v" + this.version;
+ }),
+ (c._ = {});
+ var y = { win: a.window, doc: a.window.document };
+ c._.glob = y;
+ var z = "hasOwnProperty",
+ A = String,
+ B = parseFloat,
+ C = parseInt,
+ D = Math,
+ E = D.max,
+ F = D.min,
+ G = D.abs,
+ H = (D.pow, D.PI),
+ I = (D.round, ""),
+ J = Object.prototype.toString,
+ K =
+ /^\s*((#[a-f\d]{6})|(#[a-f\d]{3})|rgba?\(\s*([\d\.]+%?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+%?(?:\s*,\s*[\d\.]+%?)?)\s*\)|hsba?\(\s*([\d\.]+(?:deg|\xb0|%)?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+(?:%?\s*,\s*[\d\.]+)?%?)\s*\)|hsla?\(\s*([\d\.]+(?:deg|\xb0|%)?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+(?:%?\s*,\s*[\d\.]+)?%?)\s*\))\s*$/i,
+ L = ((c._.separator = /[,\s]+/), /[\s]*,[\s]*/),
+ M = { hs: 1, rg: 1 },
+ N = /([a-z])[\s,]*((-?\d*\.?\d*(?:e[\-+]?\d+)?[\s]*,?[\s]*)+)/gi,
+ O = /([rstm])[\s,]*((-?\d*\.?\d*(?:e[\-+]?\d+)?[\s]*,?[\s]*)+)/gi,
+ P = /(-?\d*\.?\d*(?:e[\-+]?\d+)?)[\s]*,?[\s]*/gi,
+ Q = 0,
+ R = "S" + (+new Date()).toString(36),
+ S = function (a) {
+ return (a && a.type ? a.type : I) + R + (Q++).toString(36);
+ },
+ T = "http://www.w3.org/1999/xlink",
+ U = "http://www.w3.org/2000/svg",
+ V = {};
+ c.url = function (a) {
+ return "url('#" + a + "')";
+ };
+ (c._.$ = d),
+ (c._.id = S),
+ (c.format = (function () {
+ var a = /\{([^\}]+)\}/g,
+ b = /(?:(?:^|\.)(.+?)(?=\[|\.|$|\()|\[('|")(.+?)\2\])(\(\))?/g,
+ c = function (a, c, d) {
+ var e = d;
+ return (
+ c.replace(b, function (a, b, c, d, f) {
+ (b = b || d),
+ e && (b in e && (e = e[b]), "function" == typeof e && f && (e = e()));
+ }),
+ (e = (null == e || e == d ? a : e) + "")
+ );
+ };
+ return function (b, d) {
+ return A(b).replace(a, function (a, b) {
+ return c(a, b, d);
+ });
+ };
+ })()),
+ (c._.clone = f),
+ (c._.cacher = i),
+ (c.rad = k),
+ (c.deg = l),
+ (c.sin = function (a) {
+ return D.sin(c.rad(a));
+ }),
+ (c.tan = function (a) {
+ return D.tan(c.rad(a));
+ }),
+ (c.cos = function (a) {
+ return D.cos(c.rad(a));
+ }),
+ (c.asin = function (a) {
+ return c.deg(D.asin(a));
+ }),
+ (c.acos = function (a) {
+ return c.deg(D.acos(a));
+ }),
+ (c.atan = function (a) {
+ return c.deg(D.atan(a));
+ }),
+ (c.atan2 = function (a) {
+ return c.deg(D.atan2(a));
+ }),
+ (c.angle = j),
+ (c.len = function (a, b, d, e) {
+ return Math.sqrt(c.len2(a, b, d, e));
+ }),
+ (c.len2 = function (a, b, c, d) {
+ return (a - c) * (a - c) + (b - d) * (b - d);
+ }),
+ (c.closestPoint = function (a, b, c) {
+ function d(a) {
+ var d = a.x - b,
+ e = a.y - c;
+ return d * d + e * e;
+ }
+ for (
+ var e,
+ f,
+ g,
+ h,
+ i = a.node,
+ j = i.getTotalLength(),
+ k = (j / i.pathSegList.numberOfItems) * 0.125,
+ l = 1 / 0,
+ m = 0;
+ j >= m;
+ m += k
+ )
+ (h = d((g = i.getPointAtLength(m)))) < l && ((e = g), (f = m), (l = h));
+ for (k *= 0.5; k > 0.5; ) {
+ var n, o, p, q, r, s;
+ (p = f - k) >= 0 && (r = d((n = i.getPointAtLength(p)))) < l
+ ? ((e = n), (f = p), (l = r))
+ : (q = f + k) <= j && (s = d((o = i.getPointAtLength(q)))) < l
+ ? ((e = o), (f = q), (l = s))
+ : (k *= 0.5);
+ }
+ return (e = { x: e.x, y: e.y, length: f, distance: Math.sqrt(l) });
+ }),
+ (c.is = e),
+ (c.snapTo = function (a, b, c) {
+ if (((c = e(c, "finite") ? c : 10), e(a, "array"))) {
+ for (var d = a.length; d--; ) if (G(a[d] - b) <= c) return a[d];
+ } else {
+ a = +a;
+ var f = b % a;
+ if (c > f) return b - f;
+ if (f > a - c) return b - f + a;
+ }
+ return b;
+ }),
+ (c.getRGB = i(function (a) {
+ if (!a || (a = A(a)).indexOf("-") + 1)
+ return { r: -1, g: -1, b: -1, hex: "none", error: 1, toString: Z };
+ if ("none" == a) return { r: -1, g: -1, b: -1, hex: "none", toString: Z };
+ if ((!(M[z](a.toLowerCase().substring(0, 2)) || "#" == a.charAt()) && (a = W(a)), !a))
+ return { r: -1, g: -1, b: -1, hex: "none", error: 1, toString: Z };
+ var b,
+ d,
+ f,
+ g,
+ h,
+ i,
+ j = a.match(K);
+ return j
+ ? (j[2] &&
+ ((f = C(j[2].substring(5), 16)),
+ (d = C(j[2].substring(3, 5), 16)),
+ (b = C(j[2].substring(1, 3), 16))),
+ j[3] &&
+ ((f = C((h = j[3].charAt(3)) + h, 16)),
+ (d = C((h = j[3].charAt(2)) + h, 16)),
+ (b = C((h = j[3].charAt(1)) + h, 16))),
+ j[4] &&
+ ((i = j[4].split(L)),
+ (b = B(i[0])),
+ "%" == i[0].slice(-1) && (b *= 2.55),
+ (d = B(i[1])),
+ "%" == i[1].slice(-1) && (d *= 2.55),
+ (f = B(i[2])),
+ "%" == i[2].slice(-1) && (f *= 2.55),
+ "rgba" == j[1].toLowerCase().slice(0, 4) && (g = B(i[3])),
+ i[3] && "%" == i[3].slice(-1) && (g /= 100)),
+ j[5]
+ ? ((i = j[5].split(L)),
+ (b = B(i[0])),
+ "%" == i[0].slice(-1) && (b /= 100),
+ (d = B(i[1])),
+ "%" == i[1].slice(-1) && (d /= 100),
+ (f = B(i[2])),
+ "%" == i[2].slice(-1) && (f /= 100),
+ ("deg" == i[0].slice(-3) || "°" == i[0].slice(-1)) && (b /= 360),
+ "hsba" == j[1].toLowerCase().slice(0, 4) && (g = B(i[3])),
+ i[3] && "%" == i[3].slice(-1) && (g /= 100),
+ c.hsb2rgb(b, d, f, g))
+ : j[6]
+ ? ((i = j[6].split(L)),
+ (b = B(i[0])),
+ "%" == i[0].slice(-1) && (b /= 100),
+ (d = B(i[1])),
+ "%" == i[1].slice(-1) && (d /= 100),
+ (f = B(i[2])),
+ "%" == i[2].slice(-1) && (f /= 100),
+ ("deg" == i[0].slice(-3) || "°" == i[0].slice(-1)) && (b /= 360),
+ "hsla" == j[1].toLowerCase().slice(0, 4) && (g = B(i[3])),
+ i[3] && "%" == i[3].slice(-1) && (g /= 100),
+ c.hsl2rgb(b, d, f, g))
+ : ((b = F(D.round(b), 255)),
+ (d = F(D.round(d), 255)),
+ (f = F(D.round(f), 255)),
+ (g = F(E(g, 0), 1)),
+ (j = { r: b, g: d, b: f, toString: Z }),
+ (j.hex = "#" + (16777216 | f | (d << 8) | (b << 16)).toString(16).slice(1)),
+ (j.opacity = e(g, "finite") ? g : 1),
+ j))
+ : { r: -1, g: -1, b: -1, hex: "none", error: 1, toString: Z };
+ }, c)),
+ (c.hsb = i(function (a, b, d) {
+ return c.hsb2rgb(a, b, d).hex;
+ })),
+ (c.hsl = i(function (a, b, d) {
+ return c.hsl2rgb(a, b, d).hex;
+ })),
+ (c.rgb = i(function (a, b, c, d) {
+ if (e(d, "finite")) {
+ var f = D.round;
+ return "rgba(" + [f(a), f(b), f(c), +d.toFixed(2)] + ")";
+ }
+ return "#" + (16777216 | c | (b << 8) | (a << 16)).toString(16).slice(1);
+ }));
+ var W = function (a) {
+ var b = y.doc.getElementsByTagName("head")[0] || y.doc.getElementsByTagName("svg")[0],
+ c = "rgb(255, 0, 0)";
+ return (W = i(function (a) {
+ if ("red" == a.toLowerCase()) return c;
+ (b.style.color = c), (b.style.color = a);
+ var d = y.doc.defaultView.getComputedStyle(b, I).getPropertyValue("color");
+ return d == c ? null : d;
+ }))(a);
+ },
+ X = function () {
+ return "hsb(" + [this.h, this.s, this.b] + ")";
+ },
+ Y = function () {
+ return "hsl(" + [this.h, this.s, this.l] + ")";
+ },
+ Z = function () {
+ return 1 == this.opacity || null == this.opacity
+ ? this.hex
+ : "rgba(" + [this.r, this.g, this.b, this.opacity] + ")";
+ },
+ $ = function (a, b, d) {
+ if (
+ (null == b &&
+ e(a, "object") &&
+ "r" in a &&
+ "g" in a &&
+ "b" in a &&
+ ((d = a.b), (b = a.g), (a = a.r)),
+ null == b && e(a, string))
+ ) {
+ var f = c.getRGB(a);
+ (a = f.r), (b = f.g), (d = f.b);
+ }
+ return (a > 1 || b > 1 || d > 1) && ((a /= 255), (b /= 255), (d /= 255)), [a, b, d];
+ },
+ _ = function (a, b, d, f) {
+ (a = D.round(255 * a)), (b = D.round(255 * b)), (d = D.round(255 * d));
+ var g = {
+ r: a,
+ g: b,
+ b: d,
+ opacity: e(f, "finite") ? f : 1,
+ hex: c.rgb(a, b, d),
+ toString: Z,
+ };
+ return e(f, "finite") && (g.opacity = f), g;
+ };
+ (c.color = function (a) {
+ var b;
+ return (
+ e(a, "object") && "h" in a && "s" in a && "b" in a
+ ? ((b = c.hsb2rgb(a)),
+ (a.r = b.r),
+ (a.g = b.g),
+ (a.b = b.b),
+ (a.opacity = 1),
+ (a.hex = b.hex))
+ : e(a, "object") && "h" in a && "s" in a && "l" in a
+ ? ((b = c.hsl2rgb(a)),
+ (a.r = b.r),
+ (a.g = b.g),
+ (a.b = b.b),
+ (a.opacity = 1),
+ (a.hex = b.hex))
+ : (e(a, "string") && (a = c.getRGB(a)),
+ e(a, "object") && "r" in a && "g" in a && "b" in a && !("error" in a)
+ ? ((b = c.rgb2hsl(a)),
+ (a.h = b.h),
+ (a.s = b.s),
+ (a.l = b.l),
+ (b = c.rgb2hsb(a)),
+ (a.v = b.b))
+ : ((a = { hex: "none" }),
+ (a.r = a.g = a.b = a.h = a.s = a.v = a.l = -1),
+ (a.error = 1))),
+ (a.toString = Z),
+ a
+ );
+ }),
+ (c.hsb2rgb = function (a, b, c, d) {
+ e(a, "object") &&
+ "h" in a &&
+ "s" in a &&
+ "b" in a &&
+ ((c = a.b), (b = a.s), (d = a.o), (a = a.h)),
+ (a *= 360);
+ var f, g, h, i, j;
+ return (
+ (a = (a % 360) / 60),
+ (j = c * b),
+ (i = j * (1 - G((a % 2) - 1))),
+ (f = g = h = c - j),
+ (a = ~~a),
+ (f += [j, i, 0, 0, i, j][a]),
+ (g += [i, j, j, i, 0, 0][a]),
+ (h += [0, 0, i, j, j, i][a]),
+ _(f, g, h, d)
+ );
+ }),
+ (c.hsl2rgb = function (a, b, c, d) {
+ e(a, "object") && "h" in a && "s" in a && "l" in a && ((c = a.l), (b = a.s), (a = a.h)),
+ (a > 1 || b > 1 || c > 1) && ((a /= 360), (b /= 100), (c /= 100)),
+ (a *= 360);
+ var f, g, h, i, j;
+ return (
+ (a = (a % 360) / 60),
+ (j = 2 * b * (0.5 > c ? c : 1 - c)),
+ (i = j * (1 - G((a % 2) - 1))),
+ (f = g = h = c - j / 2),
+ (a = ~~a),
+ (f += [j, i, 0, 0, i, j][a]),
+ (g += [i, j, j, i, 0, 0][a]),
+ (h += [0, 0, i, j, j, i][a]),
+ _(f, g, h, d)
+ );
+ }),
+ (c.rgb2hsb = function (a, b, c) {
+ (c = $(a, b, c)), (a = c[0]), (b = c[1]), (c = c[2]);
+ var d, e, f, g;
+ return (
+ (f = E(a, b, c)),
+ (g = f - F(a, b, c)),
+ (d =
+ 0 == g ? null : f == a ? (b - c) / g : f == b ? (c - a) / g + 2 : (a - b) / g + 4),
+ (d = (((d + 360) % 6) * 60) / 360),
+ (e = 0 == g ? 0 : g / f),
+ { h: d, s: e, b: f, toString: X }
+ );
+ }),
+ (c.rgb2hsl = function (a, b, c) {
+ (c = $(a, b, c)), (a = c[0]), (b = c[1]), (c = c[2]);
+ var d, e, f, g, h, i;
+ return (
+ (g = E(a, b, c)),
+ (h = F(a, b, c)),
+ (i = g - h),
+ (d =
+ 0 == i ? null : g == a ? (b - c) / i : g == b ? (c - a) / i + 2 : (a - b) / i + 4),
+ (d = (((d + 360) % 6) * 60) / 360),
+ (f = (g + h) / 2),
+ (e = 0 == i ? 0 : 0.5 > f ? i / (2 * f) : i / (2 - 2 * f)),
+ { h: d, s: e, l: f, toString: Y }
+ );
+ }),
+ (c.parsePathString = function (a) {
+ if (!a) return null;
+ var b = c.path(a);
+ if (b.arr) return c.path.clone(b.arr);
+ var d = {
+ a: 7,
+ c: 6,
+ o: 2,
+ h: 1,
+ l: 2,
+ m: 2,
+ r: 4,
+ q: 4,
+ s: 4,
+ t: 2,
+ v: 1,
+ u: 3,
+ z: 0,
+ },
+ f = [];
+ return (
+ e(a, "array") && e(a[0], "array") && (f = c.path.clone(a)),
+ f.length ||
+ A(a).replace(N, function (a, b, c) {
+ var e = [],
+ g = b.toLowerCase();
+ if (
+ (c.replace(P, function (a, b) {
+ b && e.push(+b);
+ }),
+ "m" == g &&
+ e.length > 2 &&
+ (f.push([b].concat(e.splice(0, 2))), (g = "l"), (b = "m" == b ? "l" : "L")),
+ "o" == g && 1 == e.length && f.push([b, e[0]]),
+ "r" == g)
+ )
+ f.push([b].concat(e));
+ else for (; e.length >= d[g] && (f.push([b].concat(e.splice(0, d[g]))), d[g]); );
+ }),
+ (f.toString = c.path.toString),
+ (b.arr = c.path.clone(f)),
+ f
+ );
+ });
+ var aa = (c.parseTransformString = function (a) {
+ if (!a) return null;
+ var b = [];
+ return (
+ e(a, "array") && e(a[0], "array") && (b = c.path.clone(a)),
+ b.length ||
+ A(a).replace(O, function (a, c, d) {
+ var e = [];
+ c.toLowerCase();
+ d.replace(P, function (a, b) {
+ b && e.push(+b);
+ }),
+ b.push([c].concat(e));
+ }),
+ (b.toString = c.path.toString),
+ b
+ );
+ });
+ (c._.svgTransform2string = m),
+ (c._.rgTransform = /^[a-z][\s]*-?\.?\d/i),
+ (c._.transform2matrix = n),
+ (c._unit2px = q);
+ y.doc.contains || y.doc.compareDocumentPosition
+ ? function (a, b) {
+ var c = 9 == a.nodeType ? a.documentElement : a,
+ d = b && b.parentNode;
+ return (
+ a == d ||
+ !(
+ !d ||
+ 1 != d.nodeType ||
+ !(c.contains
+ ? c.contains(d)
+ : a.compareDocumentPosition && 16 & a.compareDocumentPosition(d))
+ )
+ );
+ }
+ : function (a, b) {
+ if (b) for (; b; ) if (((b = b.parentNode), b == a)) return !0;
+ return !1;
+ };
+ (c._.getSomeDefs = o),
+ (c._.getSomeSVG = p),
+ (c.select = function (a) {
+ return (a = A(a).replace(/([^\\]):/g, "$1\\:")), w(y.doc.querySelector(a));
+ }),
+ (c.selectAll = function (a) {
+ for (
+ var b = y.doc.querySelectorAll(a), d = (c.set || Array)(), e = 0;
+ e < b.length;
+ e++
+ )
+ d.push(w(b[e]));
+ return d;
+ }),
+ setInterval(function () {
+ for (var a in V)
+ if (V[z](a)) {
+ var b = V[a],
+ c = b.node;
+ (("svg" != b.type && !c.ownerSVGElement) ||
+ ("svg" == b.type &&
+ (!c.parentNode ||
+ ("ownerSVGElement" in c.parentNode && !c.ownerSVGElement)))) &&
+ delete V[a];
+ }
+ }, 1e4),
+ (s.prototype.attr = function (a, c) {
+ var d = this,
+ f = d.node;
+ if (!a) {
+ if (1 != f.nodeType) return { text: f.nodeValue };
+ for (var g = f.attributes, h = {}, i = 0, j = g.length; j > i; i++)
+ h[g[i].nodeName] = g[i].nodeValue;
+ return h;
+ }
+ if (e(a, "string")) {
+ if (!(arguments.length > 1)) return b("snap.util.getattr." + a, d).firstDefined();
+ var k = {};
+ (k[a] = c), (a = k);
+ }
+ for (var l in a) a[z](l) && b("snap.util.attr." + l, d, a[l]);
+ return d;
+ }),
+ (c.parse = function (a) {
+ var b = y.doc.createDocumentFragment(),
+ c = !0,
+ d = y.doc.createElement("div");
+ if (
+ ((a = A(a)),
+ a.match(/^\s*<\s*svg(?:\s|>)/) || ((a = ""), (c = !1)),
+ (d.innerHTML = a),
+ (a = d.getElementsByTagName("svg")[0]))
+ )
+ if (c) b = a;
+ else for (; a.firstChild; ) b.appendChild(a.firstChild);
+ return new t(b);
+ }),
+ (c.fragment = function () {
+ for (
+ var a = Array.prototype.slice.call(arguments, 0),
+ b = y.doc.createDocumentFragment(),
+ d = 0,
+ e = a.length;
+ e > d;
+ d++
+ ) {
+ var f = a[d];
+ f.node && f.node.nodeType && b.appendChild(f.node),
+ f.nodeType && b.appendChild(f),
+ "string" == typeof f && b.appendChild(c.parse(f).node);
+ }
+ return new t(b);
+ }),
+ (c._.make = u),
+ (c._.wrap = w),
+ (v.prototype.el = function (a, b) {
+ var c = u(a, this.node);
+ return b && c.attr(b), c;
+ }),
+ (s.prototype.children = function () {
+ for (var a = [], b = this.node.childNodes, d = 0, e = b.length; e > d; d++)
+ a[d] = c(b[d]);
+ return a;
+ }),
+ (s.prototype.toJSON = function () {
+ var a = [];
+ return x([this], a), a[0];
+ }),
+ b.on("snap.util.getattr", function () {
+ var a = b.nt();
+ a = a.substring(a.lastIndexOf(".") + 1);
+ var c = a.replace(/[A-Z]/g, function (a) {
+ return "-" + a.toLowerCase();
+ });
+ return ba[z](c)
+ ? this.node.ownerDocument.defaultView
+ .getComputedStyle(this.node, null)
+ .getPropertyValue(c)
+ : d(this.node, a);
+ });
+ var ba = {
+ "alignment-baseline": 0,
+ "baseline-shift": 0,
+ clip: 0,
+ "clip-path": 0,
+ "clip-rule": 0,
+ color: 0,
+ "color-interpolation": 0,
+ "color-interpolation-filters": 0,
+ "color-profile": 0,
+ "color-rendering": 0,
+ cursor: 0,
+ direction: 0,
+ display: 0,
+ "dominant-baseline": 0,
+ "enable-background": 0,
+ fill: 0,
+ "fill-opacity": 0,
+ "fill-rule": 0,
+ filter: 0,
+ "flood-color": 0,
+ "flood-opacity": 0,
+ font: 0,
+ "font-family": 0,
+ "font-size": 0,
+ "font-size-adjust": 0,
+ "font-stretch": 0,
+ "font-style": 0,
+ "font-variant": 0,
+ "font-weight": 0,
+ "glyph-orientation-horizontal": 0,
+ "glyph-orientation-vertical": 0,
+ "image-rendering": 0,
+ kerning: 0,
+ "letter-spacing": 0,
+ "lighting-color": 0,
+ marker: 0,
+ "marker-end": 0,
+ "marker-mid": 0,
+ "marker-start": 0,
+ mask: 0,
+ opacity: 0,
+ overflow: 0,
+ "pointer-events": 0,
+ "shape-rendering": 0,
+ "stop-color": 0,
+ "stop-opacity": 0,
+ stroke: 0,
+ "stroke-dasharray": 0,
+ "stroke-dashoffset": 0,
+ "stroke-linecap": 0,
+ "stroke-linejoin": 0,
+ "stroke-miterlimit": 0,
+ "stroke-opacity": 0,
+ "stroke-width": 0,
+ "text-anchor": 0,
+ "text-decoration": 0,
+ "text-rendering": 0,
+ "unicode-bidi": 0,
+ visibility: 0,
+ "word-spacing": 0,
+ "writing-mode": 0,
+ };
+ b.on("snap.util.attr", function (a) {
+ var c = b.nt(),
+ e = {};
+ (c = c.substring(c.lastIndexOf(".") + 1)), (e[c] = a);
+ var f = c.replace(/-(\w)/gi, function (a, b) {
+ return b.toUpperCase();
+ }),
+ g = c.replace(/[A-Z]/g, function (a) {
+ return "-" + a.toLowerCase();
+ });
+ ba[z](g) ? (this.node.style[f] = null == a ? I : a) : d(this.node, e);
+ }),
+ (function (a) {})(v.prototype),
+ (c.ajax = function (a, c, d, f) {
+ var g = new XMLHttpRequest(),
+ h = S();
+ if (g) {
+ if (e(c, "function")) (f = d), (d = c), (c = null);
+ else if (e(c, "object")) {
+ var i = [];
+ for (var j in c)
+ c.hasOwnProperty(j) &&
+ i.push(encodeURIComponent(j) + "=" + encodeURIComponent(c[j]));
+ c = i.join("&");
+ }
+ return (
+ g.open(c ? "POST" : "GET", a, !0),
+ c &&
+ (g.setRequestHeader("X-Requested-With", "XMLHttpRequest"),
+ g.setRequestHeader("Content-type", "application/x-www-form-urlencoded")),
+ d &&
+ (b.once("snap.ajax." + h + ".0", d),
+ b.once("snap.ajax." + h + ".200", d),
+ b.once("snap.ajax." + h + ".304", d)),
+ (g.onreadystatechange = function () {
+ 4 == g.readyState && b("snap.ajax." + h + "." + g.status, f, g);
+ }),
+ 4 == g.readyState ? g : (g.send(c), g)
+ );
+ }
+ }),
+ (c.load = function (a, b, d) {
+ c.ajax(a, function (a) {
+ var e = c.parse(a.responseText);
+ d ? b.call(d, e) : b(e);
+ });
+ });
+ var ca = function (a) {
+ var b = a.getBoundingClientRect(),
+ c = a.ownerDocument,
+ d = c.body,
+ e = c.documentElement,
+ f = e.clientTop || d.clientTop || 0,
+ h = e.clientLeft || d.clientLeft || 0,
+ i = b.top + (g.win.pageYOffset || e.scrollTop || d.scrollTop) - f,
+ j = b.left + (g.win.pageXOffset || e.scrollLeft || d.scrollLeft) - h;
+ return { y: i, x: j };
+ };
+ return (
+ (c.getElementByPoint = function (a, b) {
+ var c = this,
+ d = (c.canvas, y.doc.elementFromPoint(a, b));
+ if (y.win.opera && "svg" == d.tagName) {
+ var e = ca(d),
+ f = d.createSVGRect();
+ (f.x = a - e.x), (f.y = b - e.y), (f.width = f.height = 1);
+ var g = d.getIntersectionList(f, null);
+ g.length && (d = g[g.length - 1]);
+ }
+ return d ? w(d) : null;
+ }),
+ (c.plugin = function (a) {
+ a(c, s, v, y, t);
+ }),
+ (y.win.Snap = c),
+ c
+ );
+ })(a || this);
+ return (
+ d.plugin(function (c, d, e, f, g) {
+ function h(a, b) {
+ if (null == b) {
+ var d = !0;
+ if (
+ ((b =
+ "linearGradient" == a.type || "radialGradient" == a.type
+ ? a.node.getAttribute("gradientTransform")
+ : "pattern" == a.type
+ ? a.node.getAttribute("patternTransform")
+ : a.node.getAttribute("transform")),
+ !b)
+ )
+ return new c.Matrix();
+ b = c._.svgTransform2string(b);
+ } else (b = c._.rgTransform.test(b) ? m(b).replace(/\.{3}|\u2026/g, a._.transform || "") : c._.svgTransform2string(b)), l(b, "array") && (b = c.path ? c.path.toString.call(b) : m(b)), (a._.transform = b);
+ var e = c._.transform2matrix(b, a.getBBox(1));
+ return d ? e : void (a.matrix = e);
+ }
+ function i(a) {
+ function b(a, b) {
+ var d = o(a.node, b);
+ (d = d && d.match(g)),
+ (d = d && d[2]),
+ d &&
+ "#" == d.charAt() &&
+ ((d = d.substring(1)),
+ d &&
+ (i[d] = (i[d] || []).concat(function (d) {
+ var e = {};
+ (e[b] = c.url(d)), o(a.node, e);
+ })));
+ }
+ function d(a) {
+ var b = o(a.node, "xlink:href");
+ b &&
+ "#" == b.charAt() &&
+ ((b = b.substring(1)),
+ b &&
+ (i[b] = (i[b] || []).concat(function (b) {
+ a.attr("xlink:href", "#" + b);
+ })));
+ }
+ for (
+ var e,
+ f = a.selectAll("*"),
+ g = /^\s*url\(("|'|)(.*)\1\)\s*$/,
+ h = [],
+ i = {},
+ j = 0,
+ k = f.length;
+ k > j;
+ j++
+ ) {
+ (e = f[j]),
+ b(e, "fill"),
+ b(e, "stroke"),
+ b(e, "filter"),
+ b(e, "mask"),
+ b(e, "clip-path"),
+ d(e);
+ var l = o(e.node, "id");
+ l && (o(e.node, { id: e.id }), h.push({ old: l, id: e.id }));
+ }
+ for (j = 0, k = h.length; k > j; j++) {
+ var m = i[h[j].old];
+ if (m) for (var n = 0, p = m.length; p > n; n++) m[n](h[j].id);
+ }
+ }
+ function j(a) {
+ return function () {
+ var b = a ? "<" + this.type : "",
+ c = this.node.attributes,
+ d = this.node.childNodes;
+ if (a)
+ for (var e = 0, f = c.length; f > e; e++)
+ b += " " + c[e].name + '="' + c[e].value.replace(/"/g, '\\"') + '"';
+ if (d.length) {
+ for (a && (b += ">"), e = 0, f = d.length; f > e; e++)
+ 3 == d[e].nodeType
+ ? (b += d[e].nodeValue)
+ : 1 == d[e].nodeType && (b += s(d[e]).toString());
+ a && (b += "" + this.type + ">");
+ } else a && (b += "/>");
+ return b;
+ };
+ }
+ var k = d.prototype,
+ l = c.is,
+ m = String,
+ n = c._unit2px,
+ o = c._.$,
+ p = c._.make,
+ q = c._.getSomeDefs,
+ r = "hasOwnProperty",
+ s = c._.wrap;
+ k.getBBox = function (a) {
+ if ("tspan" == this.type) return c._.box(this.node.getClientRects().item(0));
+ if (!c.Matrix || !c.path) return this.node.getBBox();
+ var b = this,
+ d = new c.Matrix();
+ if (b.removed) return c._.box();
+ for (; "use" == b.type; )
+ if (
+ (a ||
+ (d = d.add(
+ b.transform().localMatrix.translate(b.attr("x") || 0, b.attr("y") || 0)
+ )),
+ b.original)
+ )
+ b = b.original;
+ else {
+ var e = b.attr("xlink:href");
+ b = b.original = b.node.ownerDocument.getElementById(e.substring(e.indexOf("#") + 1));
+ }
+ var f = b._,
+ g = c.path.get[b.type] || c.path.get.deflt;
+ try {
+ return a
+ ? ((f.bboxwt = g ? c.path.getBBox((b.realPath = g(b))) : c._.box(b.node.getBBox())),
+ c._.box(f.bboxwt))
+ : ((b.realPath = g(b)),
+ (b.matrix = b.transform().localMatrix),
+ (f.bbox = c.path.getBBox(c.path.map(b.realPath, d.add(b.matrix)))),
+ c._.box(f.bbox));
+ } catch (h) {
+ return c._.box();
+ }
+ };
+ var t = function () {
+ return this.string;
+ };
+ (k.transform = function (a) {
+ var b = this._;
+ if (null == a) {
+ for (
+ var d,
+ e = this,
+ f = new c.Matrix(this.node.getCTM()),
+ g = h(this),
+ i = [g],
+ j = new c.Matrix(),
+ k = g.toTransformString(),
+ l = m(g) == m(this.matrix) ? m(b.transform) : k;
+ "svg" != e.type && (e = e.parent());
+
+ )
+ i.push(h(e));
+ for (d = i.length; d--; ) j.add(i[d]);
+ return {
+ string: l,
+ globalMatrix: f,
+ totalMatrix: j,
+ localMatrix: g,
+ diffMatrix: f.clone().add(g.invert()),
+ global: f.toTransformString(),
+ total: j.toTransformString(),
+ local: k,
+ toString: t,
+ };
+ }
+ return (
+ a instanceof c.Matrix
+ ? ((this.matrix = a), (this._.transform = a.toTransformString()))
+ : h(this, a),
+ this.node &&
+ ("linearGradient" == this.type || "radialGradient" == this.type
+ ? o(this.node, { gradientTransform: this.matrix })
+ : "pattern" == this.type
+ ? o(this.node, { patternTransform: this.matrix })
+ : o(this.node, { transform: this.matrix })),
+ this
+ );
+ }),
+ (k.parent = function () {
+ return s(this.node.parentNode);
+ }),
+ (k.append = k.add =
+ function (a) {
+ if (a) {
+ if ("set" == a.type) {
+ var b = this;
+ return (
+ a.forEach(function (a) {
+ b.add(a);
+ }),
+ this
+ );
+ }
+ (a = s(a)), this.node.appendChild(a.node), (a.paper = this.paper);
+ }
+ return this;
+ }),
+ (k.appendTo = function (a) {
+ return a && ((a = s(a)), a.append(this)), this;
+ }),
+ (k.prepend = function (a) {
+ if (a) {
+ if ("set" == a.type) {
+ var b,
+ c = this;
+ return (
+ a.forEach(function (a) {
+ b ? b.after(a) : c.prepend(a), (b = a);
+ }),
+ this
+ );
+ }
+ a = s(a);
+ var d = a.parent();
+ this.node.insertBefore(a.node, this.node.firstChild),
+ this.add && this.add(),
+ (a.paper = this.paper),
+ this.parent() && this.parent().add(),
+ d && d.add();
+ }
+ return this;
+ }),
+ (k.prependTo = function (a) {
+ return (a = s(a)), a.prepend(this), this;
+ }),
+ (k.before = function (a) {
+ if ("set" == a.type) {
+ var b = this;
+ return (
+ a.forEach(function (a) {
+ var c = a.parent();
+ b.node.parentNode.insertBefore(a.node, b.node), c && c.add();
+ }),
+ this.parent().add(),
+ this
+ );
+ }
+ a = s(a);
+ var c = a.parent();
+ return (
+ this.node.parentNode.insertBefore(a.node, this.node),
+ this.parent() && this.parent().add(),
+ c && c.add(),
+ (a.paper = this.paper),
+ this
+ );
+ }),
+ (k.after = function (a) {
+ a = s(a);
+ var b = a.parent();
+ return (
+ this.node.nextSibling
+ ? this.node.parentNode.insertBefore(a.node, this.node.nextSibling)
+ : this.node.parentNode.appendChild(a.node),
+ this.parent() && this.parent().add(),
+ b && b.add(),
+ (a.paper = this.paper),
+ this
+ );
+ }),
+ (k.insertBefore = function (a) {
+ a = s(a);
+ var b = this.parent();
+ return (
+ a.node.parentNode.insertBefore(this.node, a.node),
+ (this.paper = a.paper),
+ b && b.add(),
+ a.parent() && a.parent().add(),
+ this
+ );
+ }),
+ (k.insertAfter = function (a) {
+ a = s(a);
+ var b = this.parent();
+ return (
+ a.node.parentNode.insertBefore(this.node, a.node.nextSibling),
+ (this.paper = a.paper),
+ b && b.add(),
+ a.parent() && a.parent().add(),
+ this
+ );
+ }),
+ (k.remove = function () {
+ var a = this.parent();
+ return (
+ this.node.parentNode && this.node.parentNode.removeChild(this.node),
+ delete this.paper,
+ (this.removed = !0),
+ a && a.add(),
+ this
+ );
+ }),
+ (k.select = function (a) {
+ return s(this.node.querySelector(a));
+ }),
+ (k.selectAll = function (a) {
+ for (
+ var b = this.node.querySelectorAll(a), d = (c.set || Array)(), e = 0;
+ e < b.length;
+ e++
+ )
+ d.push(s(b[e]));
+ return d;
+ }),
+ (k.asPX = function (a, b) {
+ return null == b && (b = this.attr(a)), +n(this, a, b);
+ }),
+ (k.use = function () {
+ var a,
+ b = this.node.id;
+ return (
+ b || ((b = this.id), o(this.node, { id: b })),
+ (a =
+ "linearGradient" == this.type ||
+ "radialGradient" == this.type ||
+ "pattern" == this.type
+ ? p(this.type, this.node.parentNode)
+ : p("use", this.node.parentNode)),
+ o(a.node, { "xlink:href": "#" + b }),
+ (a.original = this),
+ a
+ );
+ }),
+ (k.clone = function () {
+ var a = s(this.node.cloneNode(!0));
+ return o(a.node, "id") && o(a.node, { id: a.id }), i(a), a.insertAfter(this), a;
+ }),
+ (k.toDefs = function () {
+ var a = q(this);
+ return a.appendChild(this.node), this;
+ }),
+ (k.pattern = k.toPattern =
+ function (a, b, c, d) {
+ var e = p("pattern", q(this));
+ return (
+ null == a && (a = this.getBBox()),
+ l(a, "object") && "x" in a && ((b = a.y), (c = a.width), (d = a.height), (a = a.x)),
+ o(e.node, {
+ x: a,
+ y: b,
+ width: c,
+ height: d,
+ patternUnits: "userSpaceOnUse",
+ id: e.id,
+ viewBox: [a, b, c, d].join(" "),
+ }),
+ e.node.appendChild(this.node),
+ e
+ );
+ }),
+ (k.marker = function (a, b, c, d, e, f) {
+ var g = p("marker", q(this));
+ return (
+ null == a && (a = this.getBBox()),
+ l(a, "object") &&
+ "x" in a &&
+ ((b = a.y),
+ (c = a.width),
+ (d = a.height),
+ (e = a.refX || a.cx),
+ (f = a.refY || a.cy),
+ (a = a.x)),
+ o(g.node, {
+ viewBox: [a, b, c, d].join(" "),
+ markerWidth: c,
+ markerHeight: d,
+ orient: "auto",
+ refX: e || 0,
+ refY: f || 0,
+ id: g.id,
+ }),
+ g.node.appendChild(this.node),
+ g
+ );
+ });
+ var u = {};
+ (k.data = function (a, d) {
+ var e = (u[this.id] = u[this.id] || {});
+ if (0 == arguments.length) return b("snap.data.get." + this.id, this, e, null), e;
+ if (1 == arguments.length) {
+ if (c.is(a, "object")) {
+ for (var f in a) a[r](f) && this.data(f, a[f]);
+ return this;
+ }
+ return b("snap.data.get." + this.id, this, e[a], a), e[a];
+ }
+ return (e[a] = d), b("snap.data.set." + this.id, this, d, a), this;
+ }),
+ (k.removeData = function (a) {
+ return null == a ? (u[this.id] = {}) : u[this.id] && delete u[this.id][a], this;
+ }),
+ (k.outerSVG = k.toString = j(1)),
+ (k.innerSVG = j()),
+ (k.toDataURL = function () {
+ if (a && a.btoa) {
+ var b = this.getBBox(),
+ d = c.format(
+ '',
+ {
+ x: +b.x.toFixed(3),
+ y: +b.y.toFixed(3),
+ width: +b.width.toFixed(3),
+ height: +b.height.toFixed(3),
+ contents: this.outerSVG(),
+ }
+ );
+ return "data:image/svg+xml;base64," + btoa(unescape(encodeURIComponent(d)));
+ }
+ }),
+ (g.prototype.select = k.select),
+ (g.prototype.selectAll = k.selectAll);
+ }),
+ d.plugin(function (a, d, e, f, g) {
+ function h(a, b, c) {
+ return function (d) {
+ var e = d.slice(a, b);
+ return 1 == e.length && (e = e[0]), c ? c(e) : e;
+ };
+ }
+ var i = d.prototype,
+ j = a.is,
+ k = String,
+ l = "hasOwnProperty",
+ m = function (a, b, d, e) {
+ "function" != typeof d || d.length || ((e = d), (d = c.linear)),
+ (this.attr = a),
+ (this.dur = b),
+ d && (this.easing = d),
+ e && (this.callback = e);
+ };
+ (a._.Animation = m),
+ (a.animation = function (a, b, c, d) {
+ return new m(a, b, c, d);
+ }),
+ (i.inAnim = function () {
+ var a = this,
+ b = [];
+ for (var c in a.anims)
+ a.anims[l](c) &&
+ !(function (a) {
+ b.push({
+ anim: new m(a._attrs, a.dur, a.easing, a._callback),
+ mina: a,
+ curStatus: a.status(),
+ status: function (b) {
+ return a.status(b);
+ },
+ stop: function () {
+ a.stop();
+ },
+ });
+ })(a.anims[c]);
+ return b;
+ }),
+ (a.animate = function (a, d, e, f, g, h) {
+ "function" != typeof g || g.length || ((h = g), (g = c.linear));
+ var i = c.time(),
+ j = c(a, d, i, i + f, c.time, e, g);
+ return h && b.once("mina.finish." + j.id, h), j;
+ }),
+ (i.stop = function () {
+ for (var a = this.inAnim(), b = 0, c = a.length; c > b; b++) a[b].stop();
+ return this;
+ }),
+ (i.animate = function (a, d, e, f) {
+ "function" != typeof e || e.length || ((f = e), (e = c.linear)),
+ a instanceof m && ((f = a.callback), (e = a.easing), (d = a.dur), (a = a.attr));
+ var g,
+ i,
+ n,
+ o,
+ p = [],
+ q = [],
+ r = {},
+ s = this;
+ for (var t in a)
+ if (a[l](t)) {
+ s.equal
+ ? ((o = s.equal(t, k(a[t]))), (g = o.from), (i = o.to), (n = o.f))
+ : ((g = +s.attr(t)), (i = +a[t]));
+ var u = j(g, "array") ? g.length : 1;
+ (r[t] = h(p.length, p.length + u, n)), (p = p.concat(g)), (q = q.concat(i));
+ }
+ var v = c.time(),
+ w = c(
+ p,
+ q,
+ v,
+ v + d,
+ c.time,
+ function (a) {
+ var b = {};
+ for (var c in r) r[l](c) && (b[c] = r[c](a));
+ s.attr(b);
+ },
+ e
+ );
+ return (
+ (s.anims[w.id] = w),
+ (w._attrs = a),
+ (w._callback = f),
+ b("snap.animcreated." + s.id, w),
+ b.once("mina.finish." + w.id, function () {
+ b.off("mina.*." + w.id), delete s.anims[w.id], f && f.call(s);
+ }),
+ b.once("mina.stop." + w.id, function () {
+ b.off("mina.*." + w.id), delete s.anims[w.id];
+ }),
+ s
+ );
+ });
+ }),
+ d.plugin(function (a, b, c, d, e) {
+ function f(a, b, c, d, e, f) {
+ return null == b && "[object SVGMatrix]" == g.call(a)
+ ? ((this.a = a.a),
+ (this.b = a.b),
+ (this.c = a.c),
+ (this.d = a.d),
+ (this.e = a.e),
+ void (this.f = a.f))
+ : void (null != a
+ ? ((this.a = +a),
+ (this.b = +b),
+ (this.c = +c),
+ (this.d = +d),
+ (this.e = +e),
+ (this.f = +f))
+ : ((this.a = 1),
+ (this.b = 0),
+ (this.c = 0),
+ (this.d = 1),
+ (this.e = 0),
+ (this.f = 0)));
+ }
+ var g = Object.prototype.toString,
+ h = String,
+ i = Math,
+ j = "";
+ !(function (b) {
+ function c(a) {
+ return a[0] * a[0] + a[1] * a[1];
+ }
+ function d(a) {
+ var b = i.sqrt(c(a));
+ a[0] && (a[0] /= b), a[1] && (a[1] /= b);
+ }
+ (b.add = function (a, b, c, d, e, g) {
+ if (a && a instanceof f) return this.add(a.a, a.b, a.c, a.d, a.e, a.f);
+ var h = a * this.a + b * this.c,
+ i = a * this.b + b * this.d;
+ return (
+ (this.e += e * this.a + g * this.c),
+ (this.f += e * this.b + g * this.d),
+ (this.c = c * this.a + d * this.c),
+ (this.d = c * this.b + d * this.d),
+ (this.a = h),
+ (this.b = i),
+ this
+ );
+ }),
+ (f.prototype.multLeft = function (a, b, c, d, e, g) {
+ if (a && a instanceof f) return this.multLeft(a.a, a.b, a.c, a.d, a.e, a.f);
+ var h = a * this.a + c * this.b,
+ i = a * this.c + c * this.d,
+ j = a * this.e + c * this.f + e;
+ return (
+ (this.b = b * this.a + d * this.b),
+ (this.d = b * this.c + d * this.d),
+ (this.f = b * this.e + d * this.f + g),
+ (this.a = h),
+ (this.c = i),
+ (this.e = j),
+ this
+ );
+ }),
+ (b.invert = function () {
+ var a = this,
+ b = a.a * a.d - a.b * a.c;
+ return new f(
+ a.d / b,
+ -a.b / b,
+ -a.c / b,
+ a.a / b,
+ (a.c * a.f - a.d * a.e) / b,
+ (a.b * a.e - a.a * a.f) / b
+ );
+ }),
+ (b.clone = function () {
+ return new f(this.a, this.b, this.c, this.d, this.e, this.f);
+ }),
+ (b.translate = function (a, b) {
+ return (this.e += a * this.a + b * this.c), (this.f += a * this.b + b * this.d), this;
+ }),
+ (b.scale = function (a, b, c, d) {
+ return (
+ null == b && (b = a),
+ (c || d) && this.translate(c, d),
+ (this.a *= a),
+ (this.b *= a),
+ (this.c *= b),
+ (this.d *= b),
+ (c || d) && this.translate(-c, -d),
+ this
+ );
+ }),
+ (b.rotate = function (b, c, d) {
+ (b = a.rad(b)), (c = c || 0), (d = d || 0);
+ var e = +i.cos(b).toFixed(9),
+ f = +i.sin(b).toFixed(9);
+ return this.add(e, f, -f, e, c, d), this.add(1, 0, 0, 1, -c, -d);
+ }),
+ (b.skewX = function (a) {
+ return this.skew(a, 0);
+ }),
+ (b.skewY = function (a) {
+ return this.skew(0, a);
+ }),
+ (b.skew = function (b, c) {
+ (b = b || 0), (c = c || 0), (b = a.rad(b)), (c = a.rad(c));
+ var d = i.tan(b).toFixed(9),
+ e = i.tan(c).toFixed(9);
+ return this.add(1, e, d, 1, 0, 0);
+ }),
+ (b.x = function (a, b) {
+ return a * this.a + b * this.c + this.e;
+ }),
+ (b.y = function (a, b) {
+ return a * this.b + b * this.d + this.f;
+ }),
+ (b.get = function (a) {
+ return +this[h.fromCharCode(97 + a)].toFixed(4);
+ }),
+ (b.toString = function () {
+ return (
+ "matrix(" +
+ [
+ this.get(0),
+ this.get(1),
+ this.get(2),
+ this.get(3),
+ this.get(4),
+ this.get(5),
+ ].join() +
+ ")"
+ );
+ }),
+ (b.offset = function () {
+ return [this.e.toFixed(4), this.f.toFixed(4)];
+ }),
+ (b.determinant = function () {
+ return this.a * this.d - this.b * this.c;
+ }),
+ (b.split = function () {
+ var b = {};
+ (b.dx = this.e), (b.dy = this.f);
+ var e = [
+ [this.a, this.b],
+ [this.c, this.d],
+ ];
+ (b.scalex = i.sqrt(c(e[0]))),
+ d(e[0]),
+ (b.shear = e[0][0] * e[1][0] + e[0][1] * e[1][1]),
+ (e[1] = [e[1][0] - e[0][0] * b.shear, e[1][1] - e[0][1] * b.shear]),
+ (b.scaley = i.sqrt(c(e[1]))),
+ d(e[1]),
+ (b.shear /= b.scaley),
+ this.determinant() < 0 && (b.scalex = -b.scalex);
+ var f = e[0][1],
+ g = e[1][1];
+ return (
+ 0 > g
+ ? ((b.rotate = a.deg(i.acos(g))), 0 > f && (b.rotate = 360 - b.rotate))
+ : (b.rotate = a.deg(i.asin(f))),
+ (b.isSimple = !(
+ +b.shear.toFixed(9) ||
+ (b.scalex.toFixed(9) != b.scaley.toFixed(9) && b.rotate)
+ )),
+ (b.isSuperSimple =
+ !+b.shear.toFixed(9) && b.scalex.toFixed(9) == b.scaley.toFixed(9) && !b.rotate),
+ (b.noRotation = !+b.shear.toFixed(9) && !b.rotate),
+ b
+ );
+ }),
+ (b.toTransformString = function (a) {
+ var b = a || this.split();
+ return +b.shear.toFixed(9)
+ ? "m" +
+ [this.get(0), this.get(1), this.get(2), this.get(3), this.get(4), this.get(5)]
+ : ((b.scalex = +b.scalex.toFixed(4)),
+ (b.scaley = +b.scaley.toFixed(4)),
+ (b.rotate = +b.rotate.toFixed(4)),
+ (b.dx || b.dy ? "t" + [+b.dx.toFixed(4), +b.dy.toFixed(4)] : j) +
+ (b.rotate ? "r" + [+b.rotate.toFixed(4), 0, 0] : j) +
+ (1 != b.scalex || 1 != b.scaley ? "s" + [b.scalex, b.scaley, 0, 0] : j));
+ });
+ })(f.prototype),
+ (a.Matrix = f),
+ (a.matrix = function (a, b, c, d, e, g) {
+ return new f(a, b, c, d, e, g);
+ });
+ }),
+ d.plugin(function (a, c, d, e, f) {
+ function g(d) {
+ return function (e) {
+ if (
+ (b.stop(),
+ e instanceof f &&
+ 1 == e.node.childNodes.length &&
+ ("radialGradient" == e.node.firstChild.tagName ||
+ "linearGradient" == e.node.firstChild.tagName ||
+ "pattern" == e.node.firstChild.tagName) &&
+ ((e = e.node.firstChild), n(this).appendChild(e), (e = l(e))),
+ e instanceof c)
+ )
+ if ("radialGradient" == e.type || "linearGradient" == e.type || "pattern" == e.type) {
+ e.node.id || p(e.node, { id: e.id });
+ var g = q(e.node.id);
+ } else g = e.attr(d);
+ else if (((g = a.color(e)), g.error)) {
+ var h = a(n(this).ownerSVGElement).gradient(e);
+ h ? (h.node.id || p(h.node, { id: h.id }), (g = q(h.node.id))) : (g = e);
+ } else g = r(g);
+ var i = {};
+ (i[d] = g), p(this.node, i), (this.node.style[d] = t);
+ };
+ }
+ function h(a) {
+ b.stop(), a == +a && (a += "px"), (this.node.style.fontSize = a);
+ }
+ function i(a) {
+ for (var b = [], c = a.childNodes, d = 0, e = c.length; e > d; d++) {
+ var f = c[d];
+ 3 == f.nodeType && b.push(f.nodeValue),
+ "tspan" == f.tagName &&
+ (1 == f.childNodes.length && 3 == f.firstChild.nodeType
+ ? b.push(f.firstChild.nodeValue)
+ : b.push(i(f)));
+ }
+ return b;
+ }
+ function j() {
+ return b.stop(), this.node.style.fontSize;
+ }
+ var k = a._.make,
+ l = a._.wrap,
+ m = a.is,
+ n = a._.getSomeDefs,
+ o = /^url\((['"]?)([^)]+)\1\)$/,
+ p = a._.$,
+ q = a.url,
+ r = String,
+ s = a._.separator,
+ t = "";
+ (a.deurl = function (a) {
+ var b = String(a).match(o);
+ return b ? b[2] : a;
+ }),
+ b.on("snap.util.attr.mask", function (a) {
+ if (a instanceof c || a instanceof f) {
+ if (
+ (b.stop(),
+ a instanceof f &&
+ 1 == a.node.childNodes.length &&
+ ((a = a.node.firstChild), n(this).appendChild(a), (a = l(a))),
+ "mask" == a.type)
+ )
+ var d = a;
+ else (d = k("mask", n(this))), d.node.appendChild(a.node);
+ !d.node.id && p(d.node, { id: d.id }), p(this.node, { mask: q(d.id) });
+ }
+ }),
+ (function (a) {
+ b.on("snap.util.attr.clip", a),
+ b.on("snap.util.attr.clip-path", a),
+ b.on("snap.util.attr.clipPath", a);
+ })(function (a) {
+ if (a instanceof c || a instanceof f) {
+ b.stop();
+ for (var d, e = a.node; e; ) {
+ if ("clipPath" === e.nodeName) {
+ d = new c(e);
+ break;
+ }
+ if ("svg" === e.nodeName) {
+ d = void 0;
+ break;
+ }
+ e = e.parentNode;
+ }
+ d ||
+ ((d = k("clipPath", n(this))),
+ d.node.appendChild(a.node),
+ !d.node.id && p(d.node, { id: d.id })),
+ p(this.node, { "clip-path": q(d.node.id || d.id) });
+ }
+ }),
+ b.on("snap.util.attr.fill", g("fill")),
+ b.on("snap.util.attr.stroke", g("stroke"));
+ var u = /^([lr])(?:\(([^)]*)\))?(.*)$/i;
+ b.on("snap.util.grad.parse", function (a) {
+ function b(a, b) {
+ for (var c = (b - h) / (a - i), d = i; a > d; d++)
+ f[d].offset = +(+h + c * (d - i)).toFixed(2);
+ (i = a), (h = b);
+ }
+ a = r(a);
+ var c = a.match(u);
+ if (!c) return null;
+ var d = c[1],
+ e = c[2],
+ f = c[3];
+ (e = e.split(/\s*,\s*/).map(function (a) {
+ return +a == a ? +a : a;
+ })),
+ 1 == e.length && 0 == e[0] && (e = []),
+ (f = f.split("-")),
+ (f = f.map(function (a) {
+ a = a.split(":");
+ var b = { color: a[0] };
+ return a[1] && (b.offset = parseFloat(a[1])), b;
+ }));
+ var g = f.length,
+ h = 0,
+ i = 0;
+ g--;
+ for (var j = 0; g > j; j++) "offset" in f[j] && b(j, f[j].offset);
+ return (
+ (f[g].offset = f[g].offset || 100), b(g, f[g].offset), { type: d, params: e, stops: f }
+ );
+ }),
+ b.on("snap.util.attr.d", function (c) {
+ b.stop(),
+ m(c, "array") && m(c[0], "array") && (c = a.path.toString.call(c)),
+ (c = r(c)),
+ c.match(/[ruo]/i) && (c = a.path.toAbsolute(c)),
+ p(this.node, { d: c });
+ })(-1),
+ b.on("snap.util.attr.#text", function (a) {
+ b.stop(), (a = r(a));
+ for (var c = e.doc.createTextNode(a); this.node.firstChild; )
+ this.node.removeChild(this.node.firstChild);
+ this.node.appendChild(c);
+ })(-1),
+ b.on("snap.util.attr.path", function (a) {
+ b.stop(), this.attr({ d: a });
+ })(-1),
+ b.on("snap.util.attr.class", function (a) {
+ b.stop(), (this.node.className.baseVal = a);
+ })(-1),
+ b.on("snap.util.attr.viewBox", function (a) {
+ var c;
+ (c =
+ m(a, "object") && "x" in a
+ ? [a.x, a.y, a.width, a.height].join(" ")
+ : m(a, "array")
+ ? a.join(" ")
+ : a),
+ p(this.node, { viewBox: c }),
+ b.stop();
+ })(-1),
+ b.on("snap.util.attr.transform", function (a) {
+ this.transform(a), b.stop();
+ })(-1),
+ b.on("snap.util.attr.r", function (a) {
+ "rect" == this.type && (b.stop(), p(this.node, { rx: a, ry: a }));
+ })(-1),
+ b.on("snap.util.attr.textpath", function (a) {
+ if ((b.stop(), "text" == this.type)) {
+ var d, e, f;
+ if (!a && this.textPath) {
+ for (e = this.textPath; e.node.firstChild; )
+ this.node.appendChild(e.node.firstChild);
+ return e.remove(), void delete this.textPath;
+ }
+ if (m(a, "string")) {
+ var g = n(this),
+ h = l(g.parentNode).path(a);
+ g.appendChild(h.node), (d = h.id), h.attr({ id: d });
+ } else
+ (a = l(a)),
+ a instanceof c && ((d = a.attr("id")), d || ((d = a.id), a.attr({ id: d })));
+ if (d)
+ if (((e = this.textPath), (f = this.node), e)) e.attr({ "xlink:href": "#" + d });
+ else {
+ for (e = p("textPath", { "xlink:href": "#" + d }); f.firstChild; )
+ e.appendChild(f.firstChild);
+ f.appendChild(e), (this.textPath = l(e));
+ }
+ }
+ })(-1),
+ b.on("snap.util.attr.text", function (a) {
+ if ("text" == this.type) {
+ for (
+ var c = this.node,
+ d = function (a) {
+ var b = p("tspan");
+ if (m(a, "array")) for (var c = 0; c < a.length; c++) b.appendChild(d(a[c]));
+ else b.appendChild(e.doc.createTextNode(a));
+ return b.normalize && b.normalize(), b;
+ };
+ c.firstChild;
+
+ )
+ c.removeChild(c.firstChild);
+ for (var f = d(a); f.firstChild; ) c.appendChild(f.firstChild);
+ }
+ b.stop();
+ })(-1),
+ b.on("snap.util.attr.fontSize", h)(-1),
+ b.on("snap.util.attr.font-size", h)(-1),
+ b.on("snap.util.getattr.transform", function () {
+ return b.stop(), this.transform();
+ })(-1),
+ b.on("snap.util.getattr.textpath", function () {
+ return b.stop(), this.textPath;
+ })(-1),
+ (function () {
+ function c(c) {
+ return function () {
+ b.stop();
+ var d = e.doc.defaultView
+ .getComputedStyle(this.node, null)
+ .getPropertyValue("marker-" + c);
+ return "none" == d ? d : a(e.doc.getElementById(d.match(o)[1]));
+ };
+ }
+ function d(a) {
+ return function (c) {
+ b.stop();
+ var d = "marker" + a.charAt(0).toUpperCase() + a.substring(1);
+ if ("" == c || !c) return void (this.node.style[d] = "none");
+ if ("marker" == c.type) {
+ var e = c.node.id;
+ return e || p(c.node, { id: c.id }), void (this.node.style[d] = q(e));
+ }
+ };
+ }
+ b.on("snap.util.getattr.marker-end", c("end"))(-1),
+ b.on("snap.util.getattr.markerEnd", c("end"))(-1),
+ b.on("snap.util.getattr.marker-start", c("start"))(-1),
+ b.on("snap.util.getattr.markerStart", c("start"))(-1),
+ b.on("snap.util.getattr.marker-mid", c("mid"))(-1),
+ b.on("snap.util.getattr.markerMid", c("mid"))(-1),
+ b.on("snap.util.attr.marker-end", d("end"))(-1),
+ b.on("snap.util.attr.markerEnd", d("end"))(-1),
+ b.on("snap.util.attr.marker-start", d("start"))(-1),
+ b.on("snap.util.attr.markerStart", d("start"))(-1),
+ b.on("snap.util.attr.marker-mid", d("mid"))(-1),
+ b.on("snap.util.attr.markerMid", d("mid"))(-1);
+ })(),
+ b.on("snap.util.getattr.r", function () {
+ return "rect" == this.type && p(this.node, "rx") == p(this.node, "ry")
+ ? (b.stop(), p(this.node, "rx"))
+ : void 0;
+ })(-1),
+ b.on("snap.util.getattr.text", function () {
+ if ("text" == this.type || "tspan" == this.type) {
+ b.stop();
+ var a = i(this.node);
+ return 1 == a.length ? a[0] : a;
+ }
+ })(-1),
+ b.on("snap.util.getattr.#text", function () {
+ return this.node.textContent;
+ })(-1),
+ b.on("snap.util.getattr.fill", function (c) {
+ if (!c) {
+ b.stop();
+ var d = b("snap.util.getattr.fill", this, !0).firstDefined();
+ return a(a.deurl(d)) || d;
+ }
+ })(-1),
+ b.on("snap.util.getattr.stroke", function (c) {
+ if (!c) {
+ b.stop();
+ var d = b("snap.util.getattr.stroke", this, !0).firstDefined();
+ return a(a.deurl(d)) || d;
+ }
+ })(-1),
+ b.on("snap.util.getattr.viewBox", function () {
+ b.stop();
+ var c = p(this.node, "viewBox");
+ return c ? ((c = c.split(s)), a._.box(+c[0], +c[1], +c[2], +c[3])) : void 0;
+ })(-1),
+ b.on("snap.util.getattr.points", function () {
+ var a = p(this.node, "points");
+ return b.stop(), a ? a.split(s) : void 0;
+ })(-1),
+ b.on("snap.util.getattr.path", function () {
+ var a = p(this.node, "d");
+ return b.stop(), a;
+ })(-1),
+ b.on("snap.util.getattr.class", function () {
+ return this.node.className.baseVal;
+ })(-1),
+ b.on("snap.util.getattr.fontSize", j)(-1),
+ b.on("snap.util.getattr.font-size", j)(-1);
+ }),
+ d.plugin(function (a, b, c, d, e) {
+ var f = /\S+/g,
+ g = String,
+ h = b.prototype;
+ (h.addClass = function (a) {
+ var b,
+ c,
+ d,
+ e,
+ h = g(a || "").match(f) || [],
+ i = this.node,
+ j = i.className.baseVal,
+ k = j.match(f) || [];
+ if (h.length) {
+ for (b = 0; (d = h[b++]); ) (c = k.indexOf(d)), ~c || k.push(d);
+ (e = k.join(" ")), j != e && (i.className.baseVal = e);
+ }
+ return this;
+ }),
+ (h.removeClass = function (a) {
+ var b,
+ c,
+ d,
+ e,
+ h = g(a || "").match(f) || [],
+ i = this.node,
+ j = i.className.baseVal,
+ k = j.match(f) || [];
+ if (k.length) {
+ for (b = 0; (d = h[b++]); ) (c = k.indexOf(d)), ~c && k.splice(c, 1);
+ (e = k.join(" ")), j != e && (i.className.baseVal = e);
+ }
+ return this;
+ }),
+ (h.hasClass = function (a) {
+ var b = this.node,
+ c = b.className.baseVal,
+ d = c.match(f) || [];
+ return !!~d.indexOf(a);
+ }),
+ (h.toggleClass = function (a, b) {
+ if (null != b) return b ? this.addClass(a) : this.removeClass(a);
+ var c,
+ d,
+ e,
+ g,
+ h = (a || "").match(f) || [],
+ i = this.node,
+ j = i.className.baseVal,
+ k = j.match(f) || [];
+ for (c = 0; (e = h[c++]); ) (d = k.indexOf(e)), ~d ? k.splice(d, 1) : k.push(e);
+ return (g = k.join(" ")), j != g && (i.className.baseVal = g), this;
+ });
+ }),
+ d.plugin(function (a, c, d, e, f) {
+ function g(a) {
+ return a;
+ }
+ function h(a) {
+ return function (b) {
+ return +b.toFixed(3) + a;
+ };
+ }
+ var i = {
+ "+": function (a, b) {
+ return a + b;
+ },
+ "-": function (a, b) {
+ return a - b;
+ },
+ "/": function (a, b) {
+ return a / b;
+ },
+ "*": function (a, b) {
+ return a * b;
+ },
+ },
+ j = String,
+ k = /[a-z]+$/i,
+ l = /^\s*([+\-\/*])\s*=\s*([\d.eE+\-]+)\s*([^\d\s]+)?\s*$/;
+ b.on("snap.util.attr", function (a) {
+ var c = j(a).match(l);
+ if (c) {
+ var d = b.nt(),
+ e = d.substring(d.lastIndexOf(".") + 1),
+ f = this.attr(e),
+ g = {};
+ b.stop();
+ var h = c[3] || "",
+ m = f.match(k),
+ n = i[c[1]];
+ if (
+ (m && m == h
+ ? (a = n(parseFloat(f), +c[2]))
+ : ((f = this.asPX(e)), (a = n(this.asPX(e), this.asPX(e, c[2] + h)))),
+ isNaN(f) || isNaN(a))
+ )
+ return;
+ (g[e] = a), this.attr(g);
+ }
+ })(-10),
+ b.on("snap.util.equal", function (a, c) {
+ var d = j(this.attr(a) || ""),
+ e = j(c).match(l);
+ if (e) {
+ b.stop();
+ var f = e[3] || "",
+ m = d.match(k),
+ n = i[e[1]];
+ return m && m == f
+ ? { from: parseFloat(d), to: n(parseFloat(d), +e[2]), f: h(m) }
+ : ((d = this.asPX(a)), { from: d, to: n(d, this.asPX(a, e[2] + f)), f: g });
+ }
+ })(-10);
+ }),
+ d.plugin(function (c, d, e, f, g) {
+ var h = e.prototype,
+ i = c.is;
+ (h.rect = function (a, b, c, d, e, f) {
+ var g;
+ return (
+ null == f && (f = e),
+ i(a, "object") && "[object Object]" == a
+ ? (g = a)
+ : null != a &&
+ ((g = { x: a, y: b, width: c, height: d }), null != e && ((g.rx = e), (g.ry = f))),
+ this.el("rect", g)
+ );
+ }),
+ (h.circle = function (a, b, c) {
+ var d;
+ return (
+ i(a, "object") && "[object Object]" == a
+ ? (d = a)
+ : null != a && (d = { cx: a, cy: b, r: c }),
+ this.el("circle", d)
+ );
+ });
+ var j = (function () {
+ function a() {
+ this.parentNode.removeChild(this);
+ }
+ return function (b, c) {
+ var d = f.doc.createElement("img"),
+ e = f.doc.body;
+ (d.style.cssText = "position:absolute;left:-9999em;top:-9999em"),
+ (d.onload = function () {
+ c.call(d), (d.onload = d.onerror = null), e.removeChild(d);
+ }),
+ (d.onerror = a),
+ e.appendChild(d),
+ (d.src = b);
+ };
+ })();
+ (h.image = function (a, b, d, e, f) {
+ var g = this.el("image");
+ if (i(a, "object") && "src" in a) g.attr(a);
+ else if (null != a) {
+ var h = { "xlink:href": a, preserveAspectRatio: "none" };
+ null != b && null != d && ((h.x = b), (h.y = d)),
+ null != e && null != f
+ ? ((h.width = e), (h.height = f))
+ : j(a, function () {
+ c._.$(g.node, { width: this.offsetWidth, height: this.offsetHeight });
+ }),
+ c._.$(g.node, h);
+ }
+ return g;
+ }),
+ (h.ellipse = function (a, b, c, d) {
+ var e;
+ return (
+ i(a, "object") && "[object Object]" == a
+ ? (e = a)
+ : null != a && (e = { cx: a, cy: b, rx: c, ry: d }),
+ this.el("ellipse", e)
+ );
+ }),
+ (h.path = function (a) {
+ var b;
+ return (
+ i(a, "object") && !i(a, "array") ? (b = a) : a && (b = { d: a }), this.el("path", b)
+ );
+ }),
+ (h.group = h.g =
+ function (a) {
+ var b = this.el("g");
+ return (
+ 1 == arguments.length && a && !a.type
+ ? b.attr(a)
+ : arguments.length && b.add(Array.prototype.slice.call(arguments, 0)),
+ b
+ );
+ }),
+ (h.svg = function (a, b, c, d, e, f, g, h) {
+ var j = {};
+ return (
+ i(a, "object") && null == b
+ ? (j = a)
+ : (null != a && (j.x = a),
+ null != b && (j.y = b),
+ null != c && (j.width = c),
+ null != d && (j.height = d),
+ null != e && null != f && null != g && null != h && (j.viewBox = [e, f, g, h])),
+ this.el("svg", j)
+ );
+ }),
+ (h.mask = function (a) {
+ var b = this.el("mask");
+ return (
+ 1 == arguments.length && a && !a.type
+ ? b.attr(a)
+ : arguments.length && b.add(Array.prototype.slice.call(arguments, 0)),
+ b
+ );
+ }),
+ (h.ptrn = function (a, b, c, d, e, f, g, h) {
+ if (i(a, "object")) var j = a;
+ else
+ (j = { patternUnits: "userSpaceOnUse" }),
+ a && (j.x = a),
+ b && (j.y = b),
+ null != c && (j.width = c),
+ null != d && (j.height = d),
+ null != e && null != f && null != g && null != h
+ ? (j.viewBox = [e, f, g, h])
+ : (j.viewBox = [a || 0, b || 0, c || 0, d || 0]);
+ return this.el("pattern", j);
+ }),
+ (h.use = function (a) {
+ return null != a
+ ? (a instanceof d && (a.attr("id") || a.attr({ id: c._.id(a) }), (a = a.attr("id"))),
+ "#" == String(a).charAt() && (a = a.substring(1)),
+ this.el("use", { "xlink:href": "#" + a }))
+ : d.prototype.use.call(this);
+ }),
+ (h.symbol = function (a, b, c, d) {
+ var e = {};
+ return (
+ null != a && null != b && null != c && null != d && (e.viewBox = [a, b, c, d]),
+ this.el("symbol", e)
+ );
+ }),
+ (h.text = function (a, b, c) {
+ var d = {};
+ return (
+ i(a, "object") ? (d = a) : null != a && (d = { x: a, y: b, text: c || "" }),
+ this.el("text", d)
+ );
+ }),
+ (h.line = function (a, b, c, d) {
+ var e = {};
+ return (
+ i(a, "object") ? (e = a) : null != a && (e = { x1: a, x2: c, y1: b, y2: d }),
+ this.el("line", e)
+ );
+ }),
+ (h.polyline = function (a) {
+ arguments.length > 1 && (a = Array.prototype.slice.call(arguments, 0));
+ var b = {};
+ return (
+ i(a, "object") && !i(a, "array") ? (b = a) : null != a && (b = { points: a }),
+ this.el("polyline", b)
+ );
+ }),
+ (h.polygon = function (a) {
+ arguments.length > 1 && (a = Array.prototype.slice.call(arguments, 0));
+ var b = {};
+ return (
+ i(a, "object") && !i(a, "array") ? (b = a) : null != a && (b = { points: a }),
+ this.el("polygon", b)
+ );
+ }),
+ (function () {
+ function d() {
+ return this.selectAll("stop");
+ }
+ function e(a, b) {
+ var d = l("stop"),
+ e = { offset: +b + "%" };
+ (a = c.color(a)),
+ (e["stop-color"] = a.hex),
+ a.opacity < 1 && (e["stop-opacity"] = a.opacity),
+ l(d, e);
+ for (var f, g = this.stops(), h = 0; h < g.length; h++) {
+ var i = parseFloat(g[h].attr("offset"));
+ if (i > b) {
+ this.node.insertBefore(d, g[h].node), (f = !0);
+ break;
+ }
+ }
+ return f || this.node.appendChild(d), this;
+ }
+ function f() {
+ if ("linearGradient" == this.type) {
+ var a = l(this.node, "x1") || 0,
+ b = l(this.node, "x2") || 1,
+ d = l(this.node, "y1") || 0,
+ e = l(this.node, "y2") || 0;
+ return c._.box(a, d, math.abs(b - a), math.abs(e - d));
+ }
+ var f = this.node.cx || 0.5,
+ g = this.node.cy || 0.5,
+ h = this.node.r || 0;
+ return c._.box(f - h, g - h, 2 * h, 2 * h);
+ }
+ function g(a) {
+ var d = a,
+ e = this.stops();
+ if (
+ ("string" == typeof a &&
+ (d = b("snap.util.grad.parse", null, "l(0,0,0,1)" + a).firstDefined().stops),
+ c.is(d, "array"))
+ ) {
+ for (var f = 0; f < e.length; f++)
+ if (d[f]) {
+ var g = c.color(d[f].color),
+ h = { offset: d[f].offset + "%" };
+ (h["stop-color"] = g.hex),
+ g.opacity < 1 && (h["stop-opacity"] = g.opacity),
+ e[f].attr(h);
+ } else e[f].remove();
+ for (f = e.length; f < d.length; f++) this.addStop(d[f].color, d[f].offset);
+ return this;
+ }
+ }
+ function i(a, c) {
+ var d,
+ e = b("snap.util.grad.parse", null, c).firstDefined();
+ if (!e) return null;
+ e.params.unshift(a),
+ (d = "l" == e.type.toLowerCase() ? j.apply(0, e.params) : k.apply(0, e.params)),
+ e.type != e.type.toLowerCase() && l(d.node, { gradientUnits: "userSpaceOnUse" });
+ for (var f = e.stops, g = f.length, h = 0; g > h; h++) {
+ var i = f[h];
+ d.addStop(i.color, i.offset);
+ }
+ return d;
+ }
+ function j(a, b, h, i, j) {
+ var k = c._.make("linearGradient", a);
+ return (
+ (k.stops = d),
+ (k.addStop = e),
+ (k.getBBox = f),
+ (k.setStops = g),
+ null != b && l(k.node, { x1: b, y1: h, x2: i, y2: j }),
+ k
+ );
+ }
+ function k(a, b, g, h, i, j) {
+ var k = c._.make("radialGradient", a);
+ return (
+ (k.stops = d),
+ (k.addStop = e),
+ (k.getBBox = f),
+ null != b && l(k.node, { cx: b, cy: g, r: h }),
+ null != i && null != j && l(k.node, { fx: i, fy: j }),
+ k
+ );
+ }
+ var l = c._.$;
+ (h.gradient = function (a) {
+ return i(this.defs, a);
+ }),
+ (h.gradientLinear = function (a, b, c, d) {
+ return j(this.defs, a, b, c, d);
+ }),
+ (h.gradientRadial = function (a, b, c, d, e) {
+ return k(this.defs, a, b, c, d, e);
+ }),
+ (h.toString = function () {
+ var a,
+ b = this.node.ownerDocument,
+ d = b.createDocumentFragment(),
+ e = b.createElement("div"),
+ f = this.node.cloneNode(!0);
+ return (
+ d.appendChild(e),
+ e.appendChild(f),
+ c._.$(f, { xmlns: "http://www.w3.org/2000/svg" }),
+ (a = e.innerHTML),
+ d.removeChild(d.firstChild),
+ a
+ );
+ }),
+ (h.toDataURL = function () {
+ return a && a.btoa
+ ? "data:image/svg+xml;base64," + btoa(unescape(encodeURIComponent(this)))
+ : void 0;
+ }),
+ (h.clear = function () {
+ for (var a, b = this.node.firstChild; b; )
+ (a = b.nextSibling),
+ "defs" != b.tagName ? b.parentNode.removeChild(b) : h.clear.call({ node: b }),
+ (b = a);
+ });
+ })();
+ }),
+ d.plugin(function (a, b, c, d) {
+ function e(a) {
+ var b = (e.ps = e.ps || {});
+ return (
+ b[a] ? (b[a].sleep = 100) : (b[a] = { sleep: 100 }),
+ setTimeout(function () {
+ for (var c in b) b[M](c) && c != a && (b[c].sleep--, !b[c].sleep && delete b[c]);
+ }),
+ b[a]
+ );
+ }
+ function f(a, b, c, d) {
+ return (
+ null == a && (a = b = c = d = 0),
+ null == b && ((b = a.y), (c = a.width), (d = a.height), (a = a.x)),
+ {
+ x: a,
+ y: b,
+ width: c,
+ w: c,
+ height: d,
+ h: d,
+ x2: a + c,
+ y2: b + d,
+ cx: a + c / 2,
+ cy: b + d / 2,
+ r1: P.min(c, d) / 2,
+ r2: P.max(c, d) / 2,
+ r0: P.sqrt(c * c + d * d) / 2,
+ path: y(a, b, c, d),
+ vb: [a, b, c, d].join(" "),
+ }
+ );
+ }
+ function g() {
+ return this.join(",").replace(N, "$1");
+ }
+ function h(a) {
+ var b = L(a);
+ return (b.toString = g), b;
+ }
+ function i(a, b, c, d, e, f, g, h, i) {
+ return null == i
+ ? p(a, b, c, d, e, f, g, h)
+ : k(a, b, c, d, e, f, g, h, q(a, b, c, d, e, f, g, h, i));
+ }
+ function j(c, d) {
+ function e(a) {
+ return +(+a).toFixed(3);
+ }
+ return a._.cacher(
+ function (a, f, g) {
+ a instanceof b && (a = a.attr("d")), (a = G(a));
+ for (var h, j, l, m, n, o = "", p = {}, q = 0, r = 0, s = a.length; s > r; r++) {
+ if (((l = a[r]), "M" == l[0])) (h = +l[1]), (j = +l[2]);
+ else {
+ if (((m = i(h, j, l[1], l[2], l[3], l[4], l[5], l[6])), q + m > f)) {
+ if (d && !p.start) {
+ if (
+ ((n = i(h, j, l[1], l[2], l[3], l[4], l[5], l[6], f - q)),
+ (o += [
+ "C" + e(n.start.x),
+ e(n.start.y),
+ e(n.m.x),
+ e(n.m.y),
+ e(n.x),
+ e(n.y),
+ ]),
+ g)
+ )
+ return o;
+ (p.start = o),
+ (o = [
+ "M" + e(n.x),
+ e(n.y) + "C" + e(n.n.x),
+ e(n.n.y),
+ e(n.end.x),
+ e(n.end.y),
+ e(l[5]),
+ e(l[6]),
+ ].join()),
+ (q += m),
+ (h = +l[5]),
+ (j = +l[6]);
+ continue;
+ }
+ if (!c && !d) return (n = i(h, j, l[1], l[2], l[3], l[4], l[5], l[6], f - q));
+ }
+ (q += m), (h = +l[5]), (j = +l[6]);
+ }
+ o += l.shift() + l;
+ }
+ return (
+ (p.end = o), (n = c ? q : d ? p : k(h, j, l[0], l[1], l[2], l[3], l[4], l[5], 1))
+ );
+ },
+ null,
+ a._.clone
+ );
+ }
+ function k(a, b, c, d, e, f, g, h, i) {
+ var j = 1 - i,
+ k = T(j, 3),
+ l = T(j, 2),
+ m = i * i,
+ n = m * i,
+ o = k * a + 3 * l * i * c + 3 * j * i * i * e + n * g,
+ p = k * b + 3 * l * i * d + 3 * j * i * i * f + n * h,
+ q = a + 2 * i * (c - a) + m * (e - 2 * c + a),
+ r = b + 2 * i * (d - b) + m * (f - 2 * d + b),
+ s = c + 2 * i * (e - c) + m * (g - 2 * e + c),
+ t = d + 2 * i * (f - d) + m * (h - 2 * f + d),
+ u = j * a + i * c,
+ v = j * b + i * d,
+ w = j * e + i * g,
+ x = j * f + i * h,
+ y = 90 - (180 * P.atan2(q - s, r - t)) / Q;
+ return {
+ x: o,
+ y: p,
+ m: { x: q, y: r },
+ n: { x: s, y: t },
+ start: { x: u, y: v },
+ end: { x: w, y: x },
+ alpha: y,
+ };
+ }
+ function l(b, c, d, e, g, h, i, j) {
+ a.is(b, "array") || (b = [b, c, d, e, g, h, i, j]);
+ var k = F.apply(null, b);
+ return f(k.min.x, k.min.y, k.max.x - k.min.x, k.max.y - k.min.y);
+ }
+ function m(a, b, c) {
+ return b >= a.x && b <= a.x + a.width && c >= a.y && c <= a.y + a.height;
+ }
+ function n(a, b) {
+ return (
+ (a = f(a)),
+ (b = f(b)),
+ m(b, a.x, a.y) ||
+ m(b, a.x2, a.y) ||
+ m(b, a.x, a.y2) ||
+ m(b, a.x2, a.y2) ||
+ m(a, b.x, b.y) ||
+ m(a, b.x2, b.y) ||
+ m(a, b.x, b.y2) ||
+ m(a, b.x2, b.y2) ||
+ (((a.x < b.x2 && a.x > b.x) || (b.x < a.x2 && b.x > a.x)) &&
+ ((a.y < b.y2 && a.y > b.y) || (b.y < a.y2 && b.y > a.y)))
+ );
+ }
+ function o(a, b, c, d, e) {
+ var f = -3 * b + 9 * c - 9 * d + 3 * e,
+ g = a * f + 6 * b - 12 * c + 6 * d;
+ return a * g - 3 * b + 3 * c;
+ }
+ function p(a, b, c, d, e, f, g, h, i) {
+ null == i && (i = 1), (i = i > 1 ? 1 : 0 > i ? 0 : i);
+ for (
+ var j = i / 2,
+ k = 12,
+ l = [
+ -0.1252, 0.1252, -0.3678, 0.3678, -0.5873, 0.5873, -0.7699, 0.7699, -0.9041, 0.9041,
+ -0.9816, 0.9816,
+ ],
+ m = [
+ 0.2491, 0.2491, 0.2335, 0.2335, 0.2032, 0.2032, 0.1601, 0.1601, 0.1069, 0.1069,
+ 0.0472, 0.0472,
+ ],
+ n = 0,
+ p = 0;
+ k > p;
+ p++
+ ) {
+ var q = j * l[p] + j,
+ r = o(q, a, c, e, g),
+ s = o(q, b, d, f, h),
+ t = r * r + s * s;
+ n += m[p] * P.sqrt(t);
+ }
+ return j * n;
+ }
+ function q(a, b, c, d, e, f, g, h, i) {
+ if (!(0 > i || p(a, b, c, d, e, f, g, h) < i)) {
+ var j,
+ k = 1,
+ l = k / 2,
+ m = k - l,
+ n = 0.01;
+ for (j = p(a, b, c, d, e, f, g, h, m); U(j - i) > n; )
+ (l /= 2), (m += (i > j ? 1 : -1) * l), (j = p(a, b, c, d, e, f, g, h, m));
+ return m;
+ }
+ }
+ function r(a, b, c, d, e, f, g, h) {
+ if (!(S(a, c) < R(e, g) || R(a, c) > S(e, g) || S(b, d) < R(f, h) || R(b, d) > S(f, h))) {
+ var i = (a * d - b * c) * (e - g) - (a - c) * (e * h - f * g),
+ j = (a * d - b * c) * (f - h) - (b - d) * (e * h - f * g),
+ k = (a - c) * (f - h) - (b - d) * (e - g);
+ if (k) {
+ var l = i / k,
+ m = j / k,
+ n = +l.toFixed(2),
+ o = +m.toFixed(2);
+ if (
+ !(
+ n < +R(a, c).toFixed(2) ||
+ n > +S(a, c).toFixed(2) ||
+ n < +R(e, g).toFixed(2) ||
+ n > +S(e, g).toFixed(2) ||
+ o < +R(b, d).toFixed(2) ||
+ o > +S(b, d).toFixed(2) ||
+ o < +R(f, h).toFixed(2) ||
+ o > +S(f, h).toFixed(2)
+ )
+ )
+ return { x: l, y: m };
+ }
+ }
+ }
+ function s(a, b, c) {
+ var d = l(a),
+ e = l(b);
+ if (!n(d, e)) return c ? 0 : [];
+ for (
+ var f = p.apply(0, a),
+ g = p.apply(0, b),
+ h = ~~(f / 8),
+ i = ~~(g / 8),
+ j = [],
+ m = [],
+ o = {},
+ q = c ? 0 : [],
+ s = 0;
+ h + 1 > s;
+ s++
+ ) {
+ var t = k.apply(0, a.concat(s / h));
+ j.push({ x: t.x, y: t.y, t: s / h });
+ }
+ for (s = 0; i + 1 > s; s++)
+ (t = k.apply(0, b.concat(s / i))), m.push({ x: t.x, y: t.y, t: s / i });
+ for (s = 0; h > s; s++)
+ for (var u = 0; i > u; u++) {
+ var v = j[s],
+ w = j[s + 1],
+ x = m[u],
+ y = m[u + 1],
+ z = U(w.x - v.x) < 0.001 ? "y" : "x",
+ A = U(y.x - x.x) < 0.001 ? "y" : "x",
+ B = r(v.x, v.y, w.x, w.y, x.x, x.y, y.x, y.y);
+ if (B) {
+ if (o[B.x.toFixed(4)] == B.y.toFixed(4)) continue;
+ o[B.x.toFixed(4)] = B.y.toFixed(4);
+ var C = v.t + U((B[z] - v[z]) / (w[z] - v[z])) * (w.t - v.t),
+ D = x.t + U((B[A] - x[A]) / (y[A] - x[A])) * (y.t - x.t);
+ C >= 0 &&
+ 1 >= C &&
+ D >= 0 &&
+ 1 >= D &&
+ (c ? q++ : q.push({ x: B.x, y: B.y, t1: C, t2: D }));
+ }
+ }
+ return q;
+ }
+ function t(a, b) {
+ return v(a, b);
+ }
+ function u(a, b) {
+ return v(a, b, 1);
+ }
+ function v(a, b, c) {
+ (a = G(a)), (b = G(b));
+ for (var d, e, f, g, h, i, j, k, l, m, n = c ? 0 : [], o = 0, p = a.length; p > o; o++) {
+ var q = a[o];
+ if ("M" == q[0]) (d = h = q[1]), (e = i = q[2]);
+ else {
+ "C" == q[0]
+ ? ((l = [d, e].concat(q.slice(1))), (d = l[6]), (e = l[7]))
+ : ((l = [d, e, d, e, h, i, h, i]), (d = h), (e = i));
+ for (var r = 0, t = b.length; t > r; r++) {
+ var u = b[r];
+ if ("M" == u[0]) (f = j = u[1]), (g = k = u[2]);
+ else {
+ "C" == u[0]
+ ? ((m = [f, g].concat(u.slice(1))), (f = m[6]), (g = m[7]))
+ : ((m = [f, g, f, g, j, k, j, k]), (f = j), (g = k));
+ var v = s(l, m, c);
+ if (c) n += v;
+ else {
+ for (var w = 0, x = v.length; x > w; w++)
+ (v[w].segment1 = o), (v[w].segment2 = r), (v[w].bez1 = l), (v[w].bez2 = m);
+ n = n.concat(v);
+ }
+ }
+ }
+ }
+ }
+ return n;
+ }
+ function w(a, b, c) {
+ var d = x(a);
+ return (
+ m(d, b, c) &&
+ v(
+ a,
+ [
+ ["M", b, c],
+ ["H", d.x2 + 10],
+ ],
+ 1
+ ) %
+ 2 ==
+ 1
+ );
+ }
+ function x(a) {
+ var b = e(a);
+ if (b.bbox) return L(b.bbox);
+ if (!a) return f();
+ a = G(a);
+ for (var c, d = 0, g = 0, h = [], i = [], j = 0, k = a.length; k > j; j++)
+ if (((c = a[j]), "M" == c[0])) (d = c[1]), (g = c[2]), h.push(d), i.push(g);
+ else {
+ var l = F(d, g, c[1], c[2], c[3], c[4], c[5], c[6]);
+ (h = h.concat(l.min.x, l.max.x)),
+ (i = i.concat(l.min.y, l.max.y)),
+ (d = c[5]),
+ (g = c[6]);
+ }
+ var m = R.apply(0, h),
+ n = R.apply(0, i),
+ o = S.apply(0, h),
+ p = S.apply(0, i),
+ q = f(m, n, o - m, p - n);
+ return (b.bbox = L(q)), q;
+ }
+ function y(a, b, c, d, e) {
+ if (e)
+ return [
+ ["M", +a + +e, b],
+ ["l", c - 2 * e, 0],
+ ["a", e, e, 0, 0, 1, e, e],
+ ["l", 0, d - 2 * e],
+ ["a", e, e, 0, 0, 1, -e, e],
+ ["l", 2 * e - c, 0],
+ ["a", e, e, 0, 0, 1, -e, -e],
+ ["l", 0, 2 * e - d],
+ ["a", e, e, 0, 0, 1, e, -e],
+ ["z"],
+ ];
+ var f = [["M", a, b], ["l", c, 0], ["l", 0, d], ["l", -c, 0], ["z"]];
+ return (f.toString = g), f;
+ }
+ function z(a, b, c, d, e) {
+ if (
+ (null == e && null == d && (d = c), (a = +a), (b = +b), (c = +c), (d = +d), null != e)
+ )
+ var f = Math.PI / 180,
+ h = a + c * Math.cos(-d * f),
+ i = a + c * Math.cos(-e * f),
+ j = b + c * Math.sin(-d * f),
+ k = b + c * Math.sin(-e * f),
+ l = [
+ ["M", h, j],
+ ["A", c, c, 0, +(e - d > 180), 0, i, k],
+ ];
+ else
+ l = [
+ ["M", a, b],
+ ["m", 0, -d],
+ ["a", c, d, 0, 1, 1, 0, 2 * d],
+ ["a", c, d, 0, 1, 1, 0, -2 * d],
+ ["z"],
+ ];
+ return (l.toString = g), l;
+ }
+ function A(b) {
+ var c = e(b),
+ d = String.prototype.toLowerCase;
+ if (c.rel) return h(c.rel);
+ (a.is(b, "array") && a.is(b && b[0], "array")) || (b = a.parsePathString(b));
+ var f = [],
+ i = 0,
+ j = 0,
+ k = 0,
+ l = 0,
+ m = 0;
+ "M" == b[0][0] &&
+ ((i = b[0][1]), (j = b[0][2]), (k = i), (l = j), m++, f.push(["M", i, j]));
+ for (var n = m, o = b.length; o > n; n++) {
+ var p = (f[n] = []),
+ q = b[n];
+ if (q[0] != d.call(q[0]))
+ switch (((p[0] = d.call(q[0])), p[0])) {
+ case "a":
+ (p[1] = q[1]),
+ (p[2] = q[2]),
+ (p[3] = q[3]),
+ (p[4] = q[4]),
+ (p[5] = q[5]),
+ (p[6] = +(q[6] - i).toFixed(3)),
+ (p[7] = +(q[7] - j).toFixed(3));
+ break;
+ case "v":
+ p[1] = +(q[1] - j).toFixed(3);
+ break;
+ case "m":
+ (k = q[1]), (l = q[2]);
+ default:
+ for (var r = 1, s = q.length; s > r; r++)
+ p[r] = +(q[r] - (r % 2 ? i : j)).toFixed(3);
+ }
+ else {
+ (p = f[n] = []), "m" == q[0] && ((k = q[1] + i), (l = q[2] + j));
+ for (var t = 0, u = q.length; u > t; t++) f[n][t] = q[t];
+ }
+ var v = f[n].length;
+ switch (f[n][0]) {
+ case "z":
+ (i = k), (j = l);
+ break;
+ case "h":
+ i += +f[n][v - 1];
+ break;
+ case "v":
+ j += +f[n][v - 1];
+ break;
+ default:
+ (i += +f[n][v - 2]), (j += +f[n][v - 1]);
+ }
+ }
+ return (f.toString = g), (c.rel = h(f)), f;
+ }
+ function B(b) {
+ var c = e(b);
+ if (c.abs) return h(c.abs);
+ if (
+ ((K(b, "array") && K(b && b[0], "array")) || (b = a.parsePathString(b)),
+ !b || !b.length)
+ )
+ return [["M", 0, 0]];
+ var d,
+ f = [],
+ i = 0,
+ j = 0,
+ k = 0,
+ l = 0,
+ m = 0;
+ "M" == b[0][0] &&
+ ((i = +b[0][1]), (j = +b[0][2]), (k = i), (l = j), m++, (f[0] = ["M", i, j]));
+ for (
+ var n,
+ o,
+ p =
+ 3 == b.length &&
+ "M" == b[0][0] &&
+ "R" == b[1][0].toUpperCase() &&
+ "Z" == b[2][0].toUpperCase(),
+ q = m,
+ r = b.length;
+ r > q;
+ q++
+ ) {
+ if ((f.push((n = [])), (o = b[q]), (d = o[0]), d != d.toUpperCase()))
+ switch (((n[0] = d.toUpperCase()), n[0])) {
+ case "A":
+ (n[1] = o[1]),
+ (n[2] = o[2]),
+ (n[3] = o[3]),
+ (n[4] = o[4]),
+ (n[5] = o[5]),
+ (n[6] = +o[6] + i),
+ (n[7] = +o[7] + j);
+ break;
+ case "V":
+ n[1] = +o[1] + j;
+ break;
+ case "H":
+ n[1] = +o[1] + i;
+ break;
+ case "R":
+ for (var s = [i, j].concat(o.slice(1)), t = 2, u = s.length; u > t; t++)
+ (s[t] = +s[t] + i), (s[++t] = +s[t] + j);
+ f.pop(), (f = f.concat(I(s, p)));
+ break;
+ case "O":
+ f.pop(), (s = z(i, j, o[1], o[2])), s.push(s[0]), (f = f.concat(s));
+ break;
+ case "U":
+ f.pop(),
+ (f = f.concat(z(i, j, o[1], o[2], o[3]))),
+ (n = ["U"].concat(f[f.length - 1].slice(-2)));
+ break;
+ case "M":
+ (k = +o[1] + i), (l = +o[2] + j);
+ default:
+ for (t = 1, u = o.length; u > t; t++) n[t] = +o[t] + (t % 2 ? i : j);
+ }
+ else if ("R" == d)
+ (s = [i, j].concat(o.slice(1))),
+ f.pop(),
+ (f = f.concat(I(s, p))),
+ (n = ["R"].concat(o.slice(-2)));
+ else if ("O" == d) f.pop(), (s = z(i, j, o[1], o[2])), s.push(s[0]), (f = f.concat(s));
+ else if ("U" == d)
+ f.pop(),
+ (f = f.concat(z(i, j, o[1], o[2], o[3]))),
+ (n = ["U"].concat(f[f.length - 1].slice(-2)));
+ else for (var v = 0, w = o.length; w > v; v++) n[v] = o[v];
+ if (((d = d.toUpperCase()), "O" != d))
+ switch (n[0]) {
+ case "Z":
+ (i = +k), (j = +l);
+ break;
+ case "H":
+ i = n[1];
+ break;
+ case "V":
+ j = n[1];
+ break;
+ case "M":
+ (k = n[n.length - 2]), (l = n[n.length - 1]);
+ default:
+ (i = n[n.length - 2]), (j = n[n.length - 1]);
+ }
+ }
+ return (f.toString = g), (c.abs = h(f)), f;
+ }
+ function C(a, b, c, d) {
+ return [a, b, c, d, c, d];
+ }
+ function D(a, b, c, d, e, f) {
+ var g = 1 / 3,
+ h = 2 / 3;
+ return [g * a + h * c, g * b + h * d, g * e + h * c, g * f + h * d, e, f];
+ }
+ function E(b, c, d, e, f, g, h, i, j, k) {
+ var l,
+ m = (120 * Q) / 180,
+ n = (Q / 180) * (+f || 0),
+ o = [],
+ p = a._.cacher(function (a, b, c) {
+ var d = a * P.cos(c) - b * P.sin(c),
+ e = a * P.sin(c) + b * P.cos(c);
+ return { x: d, y: e };
+ });
+ if (!d || !e) return [b, c, i, j, i, j];
+ if (k) (y = k[0]), (z = k[1]), (w = k[2]), (x = k[3]);
+ else {
+ (l = p(b, c, -n)), (b = l.x), (c = l.y), (l = p(i, j, -n)), (i = l.x), (j = l.y);
+ var q = (P.cos((Q / 180) * f), P.sin((Q / 180) * f), (b - i) / 2),
+ r = (c - j) / 2,
+ s = (q * q) / (d * d) + (r * r) / (e * e);
+ s > 1 && ((s = P.sqrt(s)), (d = s * d), (e = s * e));
+ var t = d * d,
+ u = e * e,
+ v =
+ (g == h ? -1 : 1) *
+ P.sqrt(U((t * u - t * r * r - u * q * q) / (t * r * r + u * q * q))),
+ w = (v * d * r) / e + (b + i) / 2,
+ x = (v * -e * q) / d + (c + j) / 2,
+ y = P.asin(((c - x) / e).toFixed(9)),
+ z = P.asin(((j - x) / e).toFixed(9));
+ (y = w > b ? Q - y : y),
+ (z = w > i ? Q - z : z),
+ 0 > y && (y = 2 * Q + y),
+ 0 > z && (z = 2 * Q + z),
+ h && y > z && (y -= 2 * Q),
+ !h && z > y && (z -= 2 * Q);
+ }
+ var A = z - y;
+ if (U(A) > m) {
+ var B = z,
+ C = i,
+ D = j;
+ (z = y + m * (h && z > y ? 1 : -1)),
+ (i = w + d * P.cos(z)),
+ (j = x + e * P.sin(z)),
+ (o = E(i, j, d, e, f, 0, h, C, D, [z, B, w, x]));
+ }
+ A = z - y;
+ var F = P.cos(y),
+ G = P.sin(y),
+ H = P.cos(z),
+ I = P.sin(z),
+ J = P.tan(A / 4),
+ K = (4 / 3) * d * J,
+ L = (4 / 3) * e * J,
+ M = [b, c],
+ N = [b + K * G, c - L * F],
+ O = [i + K * I, j - L * H],
+ R = [i, j];
+ if (((N[0] = 2 * M[0] - N[0]), (N[1] = 2 * M[1] - N[1]), k)) return [N, O, R].concat(o);
+ o = [N, O, R].concat(o).join().split(",");
+ for (var S = [], T = 0, V = o.length; V > T; T++)
+ S[T] = T % 2 ? p(o[T - 1], o[T], n).y : p(o[T], o[T + 1], n).x;
+ return S;
+ }
+ function F(a, b, c, d, e, f, g, h) {
+ for (var i, j, k, l, m, n, o, p, q = [], r = [[], []], s = 0; 2 > s; ++s)
+ if (
+ (0 == s
+ ? ((j = 6 * a - 12 * c + 6 * e),
+ (i = -3 * a + 9 * c - 9 * e + 3 * g),
+ (k = 3 * c - 3 * a))
+ : ((j = 6 * b - 12 * d + 6 * f),
+ (i = -3 * b + 9 * d - 9 * f + 3 * h),
+ (k = 3 * d - 3 * b)),
+ U(i) < 1e-12)
+ ) {
+ if (U(j) < 1e-12) continue;
+ (l = -k / j), l > 0 && 1 > l && q.push(l);
+ } else
+ (o = j * j - 4 * k * i),
+ (p = P.sqrt(o)),
+ 0 > o ||
+ ((m = (-j + p) / (2 * i)),
+ m > 0 && 1 > m && q.push(m),
+ (n = (-j - p) / (2 * i)),
+ n > 0 && 1 > n && q.push(n));
+ for (var t, u = q.length, v = u; u--; )
+ (l = q[u]),
+ (t = 1 - l),
+ (r[0][u] = t * t * t * a + 3 * t * t * l * c + 3 * t * l * l * e + l * l * l * g),
+ (r[1][u] = t * t * t * b + 3 * t * t * l * d + 3 * t * l * l * f + l * l * l * h);
+ return (
+ (r[0][v] = a),
+ (r[1][v] = b),
+ (r[0][v + 1] = g),
+ (r[1][v + 1] = h),
+ (r[0].length = r[1].length = v + 2),
+ {
+ min: { x: R.apply(0, r[0]), y: R.apply(0, r[1]) },
+ max: { x: S.apply(0, r[0]), y: S.apply(0, r[1]) },
+ }
+ );
+ }
+ function G(a, b) {
+ var c = !b && e(a);
+ if (!b && c.curve) return h(c.curve);
+ for (
+ var d = B(a),
+ f = b && B(b),
+ g = { x: 0, y: 0, bx: 0, by: 0, X: 0, Y: 0, qx: null, qy: null },
+ i = { x: 0, y: 0, bx: 0, by: 0, X: 0, Y: 0, qx: null, qy: null },
+ j = function (a, b, c) {
+ var d, e;
+ if (!a) return ["C", b.x, b.y, b.x, b.y, b.x, b.y];
+ switch ((!(a[0] in { T: 1, Q: 1 }) && (b.qx = b.qy = null), a[0])) {
+ case "M":
+ (b.X = a[1]), (b.Y = a[2]);
+ break;
+ case "A":
+ a = ["C"].concat(E.apply(0, [b.x, b.y].concat(a.slice(1))));
+ break;
+ case "S":
+ "C" == c || "S" == c
+ ? ((d = 2 * b.x - b.bx), (e = 2 * b.y - b.by))
+ : ((d = b.x), (e = b.y)),
+ (a = ["C", d, e].concat(a.slice(1)));
+ break;
+ case "T":
+ "Q" == c || "T" == c
+ ? ((b.qx = 2 * b.x - b.qx), (b.qy = 2 * b.y - b.qy))
+ : ((b.qx = b.x), (b.qy = b.y)),
+ (a = ["C"].concat(D(b.x, b.y, b.qx, b.qy, a[1], a[2])));
+ break;
+ case "Q":
+ (b.qx = a[1]),
+ (b.qy = a[2]),
+ (a = ["C"].concat(D(b.x, b.y, a[1], a[2], a[3], a[4])));
+ break;
+ case "L":
+ a = ["C"].concat(C(b.x, b.y, a[1], a[2]));
+ break;
+ case "H":
+ a = ["C"].concat(C(b.x, b.y, a[1], b.y));
+ break;
+ case "V":
+ a = ["C"].concat(C(b.x, b.y, b.x, a[1]));
+ break;
+ case "Z":
+ a = ["C"].concat(C(b.x, b.y, b.X, b.Y));
+ }
+ return a;
+ },
+ k = function (a, b) {
+ if (a[b].length > 7) {
+ a[b].shift();
+ for (var c = a[b]; c.length; )
+ (m[b] = "A"), f && (n[b] = "A"), a.splice(b++, 0, ["C"].concat(c.splice(0, 6)));
+ a.splice(b, 1), (r = S(d.length, (f && f.length) || 0));
+ }
+ },
+ l = function (a, b, c, e, g) {
+ a &&
+ b &&
+ "M" == a[g][0] &&
+ "M" != b[g][0] &&
+ (b.splice(g, 0, ["M", e.x, e.y]),
+ (c.bx = 0),
+ (c.by = 0),
+ (c.x = a[g][1]),
+ (c.y = a[g][2]),
+ (r = S(d.length, (f && f.length) || 0)));
+ },
+ m = [],
+ n = [],
+ o = "",
+ p = "",
+ q = 0,
+ r = S(d.length, (f && f.length) || 0);
+ r > q;
+ q++
+ ) {
+ d[q] && (o = d[q][0]),
+ "C" != o && ((m[q] = o), q && (p = m[q - 1])),
+ (d[q] = j(d[q], g, p)),
+ "A" != m[q] && "C" == o && (m[q] = "C"),
+ k(d, q),
+ f &&
+ (f[q] && (o = f[q][0]),
+ "C" != o && ((n[q] = o), q && (p = n[q - 1])),
+ (f[q] = j(f[q], i, p)),
+ "A" != n[q] && "C" == o && (n[q] = "C"),
+ k(f, q)),
+ l(d, f, g, i, q),
+ l(f, d, i, g, q);
+ var s = d[q],
+ t = f && f[q],
+ u = s.length,
+ v = f && t.length;
+ (g.x = s[u - 2]),
+ (g.y = s[u - 1]),
+ (g.bx = O(s[u - 4]) || g.x),
+ (g.by = O(s[u - 3]) || g.y),
+ (i.bx = f && (O(t[v - 4]) || i.x)),
+ (i.by = f && (O(t[v - 3]) || i.y)),
+ (i.x = f && t[v - 2]),
+ (i.y = f && t[v - 1]);
+ }
+ return f || (c.curve = h(d)), f ? [d, f] : d;
+ }
+ function H(a, b) {
+ if (!b) return a;
+ var c, d, e, f, g, h, i;
+ for (a = G(a), e = 0, g = a.length; g > e; e++)
+ for (i = a[e], f = 1, h = i.length; h > f; f += 2)
+ (c = b.x(i[f], i[f + 1])), (d = b.y(i[f], i[f + 1])), (i[f] = c), (i[f + 1] = d);
+ return a;
+ }
+ function I(a, b) {
+ for (var c = [], d = 0, e = a.length; e - 2 * !b > d; d += 2) {
+ var f = [
+ { x: +a[d - 2], y: +a[d - 1] },
+ { x: +a[d], y: +a[d + 1] },
+ { x: +a[d + 2], y: +a[d + 3] },
+ { x: +a[d + 4], y: +a[d + 5] },
+ ];
+ b
+ ? d
+ ? e - 4 == d
+ ? (f[3] = { x: +a[0], y: +a[1] })
+ : e - 2 == d && ((f[2] = { x: +a[0], y: +a[1] }), (f[3] = { x: +a[2], y: +a[3] }))
+ : (f[0] = { x: +a[e - 2], y: +a[e - 1] })
+ : e - 4 == d
+ ? (f[3] = f[2])
+ : d || (f[0] = { x: +a[d], y: +a[d + 1] }),
+ c.push([
+ "C",
+ (-f[0].x + 6 * f[1].x + f[2].x) / 6,
+ (-f[0].y + 6 * f[1].y + f[2].y) / 6,
+ (f[1].x + 6 * f[2].x - f[3].x) / 6,
+ (f[1].y + 6 * f[2].y - f[3].y) / 6,
+ f[2].x,
+ f[2].y,
+ ]);
+ }
+ return c;
+ }
+ var J = b.prototype,
+ K = a.is,
+ L = a._.clone,
+ M = "hasOwnProperty",
+ N = /,?([a-z]),?/gi,
+ O = parseFloat,
+ P = Math,
+ Q = P.PI,
+ R = P.min,
+ S = P.max,
+ T = P.pow,
+ U = P.abs,
+ V = j(1),
+ W = j(),
+ X = j(0, 1),
+ Y = a._unit2px,
+ Z = {
+ path: function (a) {
+ return a.attr("path");
+ },
+ circle: function (a) {
+ var b = Y(a);
+ return z(b.cx, b.cy, b.r);
+ },
+ ellipse: function (a) {
+ var b = Y(a);
+ return z(b.cx || 0, b.cy || 0, b.rx, b.ry);
+ },
+ rect: function (a) {
+ var b = Y(a);
+ return y(b.x || 0, b.y || 0, b.width, b.height, b.rx, b.ry);
+ },
+ image: function (a) {
+ var b = Y(a);
+ return y(b.x || 0, b.y || 0, b.width, b.height);
+ },
+ line: function (a) {
+ return "M" + [a.attr("x1") || 0, a.attr("y1") || 0, a.attr("x2"), a.attr("y2")];
+ },
+ polyline: function (a) {
+ return "M" + a.attr("points");
+ },
+ polygon: function (a) {
+ return "M" + a.attr("points") + "z";
+ },
+ deflt: function (a) {
+ var b = a.node.getBBox();
+ return y(b.x, b.y, b.width, b.height);
+ },
+ };
+ (a.path = e),
+ (a.path.getTotalLength = V),
+ (a.path.getPointAtLength = W),
+ (a.path.getSubpath = function (a, b, c) {
+ if (this.getTotalLength(a) - c < 1e-6) return X(a, b).end;
+ var d = X(a, c, 1);
+ return b ? X(d, b).end : d;
+ }),
+ (J.getTotalLength = function () {
+ return this.node.getTotalLength ? this.node.getTotalLength() : void 0;
+ }),
+ (J.getPointAtLength = function (a) {
+ return W(this.attr("d"), a);
+ }),
+ (J.getSubpath = function (b, c) {
+ return a.path.getSubpath(this.attr("d"), b, c);
+ }),
+ (a._.box = f),
+ (a.path.findDotsAtSegment = k),
+ (a.path.bezierBBox = l),
+ (a.path.isPointInsideBBox = m),
+ (a.closest = function (b, c, d, e) {
+ for (
+ var g = 100,
+ h = f(b - g / 2, c - g / 2, g, g),
+ i = [],
+ j = d[0].hasOwnProperty("x")
+ ? function (a) {
+ return { x: d[a].x, y: d[a].y };
+ }
+ : function (a) {
+ return { x: d[a], y: e[a] };
+ },
+ k = 0;
+ 1e6 >= g && !k;
+
+ ) {
+ for (var l = 0, n = d.length; n > l; l++) {
+ var o = j(l);
+ if (m(h, o.x, o.y)) {
+ k++, i.push(o);
+ break;
+ }
+ }
+ k || ((g *= 2), (h = f(b - g / 2, c - g / 2, g, g)));
+ }
+ if (1e6 != g) {
+ var p,
+ q = 1 / 0;
+ for (l = 0, n = i.length; n > l; l++) {
+ var r = a.len(b, c, i[l].x, i[l].y);
+ q > r && ((q = r), (i[l].len = r), (p = i[l]));
+ }
+ return p;
+ }
+ }),
+ (a.path.isBBoxIntersect = n),
+ (a.path.intersection = t),
+ (a.path.intersectionNumber = u),
+ (a.path.isPointInside = w),
+ (a.path.getBBox = x),
+ (a.path.get = Z),
+ (a.path.toRelative = A),
+ (a.path.toAbsolute = B),
+ (a.path.toCubic = G),
+ (a.path.map = H),
+ (a.path.toString = g),
+ (a.path.clone = h);
+ }),
+ d.plugin(function (a, d, e, f) {
+ var g = Math.max,
+ h = Math.min,
+ i = function (a) {
+ if (
+ ((this.items = []), (this.bindings = {}), (this.length = 0), (this.type = "set"), a)
+ )
+ for (var b = 0, c = a.length; c > b; b++)
+ a[b] &&
+ ((this[this.items.length] = this.items[this.items.length] = a[b]), this.length++);
+ },
+ j = i.prototype;
+ (j.push = function () {
+ for (var a, b, c = 0, d = arguments.length; d > c; c++)
+ (a = arguments[c]),
+ a && ((b = this.items.length), (this[b] = this.items[b] = a), this.length++);
+ return this;
+ }),
+ (j.pop = function () {
+ return this.length && delete this[this.length--], this.items.pop();
+ }),
+ (j.forEach = function (a, b) {
+ for (var c = 0, d = this.items.length; d > c; c++)
+ if (a.call(b, this.items[c], c) === !1) return this;
+ return this;
+ }),
+ (j.animate = function (d, e, f, g) {
+ "function" != typeof f || f.length || ((g = f), (f = c.linear)),
+ d instanceof a._.Animation &&
+ ((g = d.callback), (f = d.easing), (e = f.dur), (d = d.attr));
+ var h = arguments;
+ if (a.is(d, "array") && a.is(h[h.length - 1], "array")) var i = !0;
+ var j,
+ k = function () {
+ j ? (this.b = j) : (j = this.b);
+ },
+ l = 0,
+ m = this,
+ n =
+ g &&
+ function () {
+ ++l == m.length && g.call(this);
+ };
+ return this.forEach(function (a, c) {
+ b.once("snap.animcreated." + a.id, k),
+ i ? h[c] && a.animate.apply(a, h[c]) : a.animate(d, e, f, n);
+ });
+ }),
+ (j.remove = function () {
+ for (; this.length; ) this.pop().remove();
+ return this;
+ }),
+ (j.bind = function (a, b, c) {
+ var d = {};
+ if ("function" == typeof b) this.bindings[a] = b;
+ else {
+ var e = c || a;
+ this.bindings[a] = function (a) {
+ (d[e] = a), b.attr(d);
+ };
+ }
+ return this;
+ }),
+ (j.attr = function (a) {
+ var b = {};
+ for (var c in a) this.bindings[c] ? this.bindings[c](a[c]) : (b[c] = a[c]);
+ for (var d = 0, e = this.items.length; e > d; d++) this.items[d].attr(b);
+ return this;
+ }),
+ (j.clear = function () {
+ for (; this.length; ) this.pop();
+ }),
+ (j.splice = function (a, b, c) {
+ (a = 0 > a ? g(this.length + a, 0) : a), (b = g(0, h(this.length - a, b)));
+ var d,
+ e = [],
+ f = [],
+ j = [];
+ for (d = 2; d < arguments.length; d++) j.push(arguments[d]);
+ for (d = 0; b > d; d++) f.push(this[a + d]);
+ for (; d < this.length - a; d++) e.push(this[a + d]);
+ var k = j.length;
+ for (d = 0; d < k + e.length; d++)
+ this.items[a + d] = this[a + d] = k > d ? j[d] : e[d - k];
+ for (d = this.items.length = this.length -= b - k; this[d]; ) delete this[d++];
+ return new i(f);
+ }),
+ (j.exclude = function (a) {
+ for (var b = 0, c = this.length; c > b; b++)
+ if (this[b] == a) return this.splice(b, 1), !0;
+ return !1;
+ }),
+ (j.insertAfter = function (a) {
+ for (var b = this.items.length; b--; ) this.items[b].insertAfter(a);
+ return this;
+ }),
+ (j.getBBox = function () {
+ for (var a = [], b = [], c = [], d = [], e = this.items.length; e--; )
+ if (!this.items[e].removed) {
+ var f = this.items[e].getBBox();
+ a.push(f.x), b.push(f.y), c.push(f.x + f.width), d.push(f.y + f.height);
+ }
+ return (
+ (a = h.apply(0, a)),
+ (b = h.apply(0, b)),
+ (c = g.apply(0, c)),
+ (d = g.apply(0, d)),
+ {
+ x: a,
+ y: b,
+ x2: c,
+ y2: d,
+ width: c - a,
+ height: d - b,
+ cx: a + (c - a) / 2,
+ cy: b + (d - b) / 2,
+ }
+ );
+ }),
+ (j.clone = function (a) {
+ a = new i();
+ for (var b = 0, c = this.items.length; c > b; b++) a.push(this.items[b].clone());
+ return a;
+ }),
+ (j.toString = function () {
+ return "Snap‘s set";
+ }),
+ (j.type = "set"),
+ (a.Set = i),
+ (a.set = function () {
+ var a = new i();
+ return arguments.length && a.push.apply(a, Array.prototype.slice.call(arguments, 0)), a;
+ });
+ }),
+ d.plugin(function (a, c, d, e) {
+ function f(a) {
+ var b = a[0];
+ switch (b.toLowerCase()) {
+ case "t":
+ return [b, 0, 0];
+ case "m":
+ return [b, 1, 0, 0, 1, 0, 0];
+ case "r":
+ return 4 == a.length ? [b, 0, a[2], a[3]] : [b, 0];
+ case "s":
+ return 5 == a.length ? [b, 1, 1, a[3], a[4]] : 3 == a.length ? [b, 1, 1] : [b, 1];
+ }
+ }
+ function g(b, c, d) {
+ (b = b || new a.Matrix()),
+ (c = c || new a.Matrix()),
+ (b = a.parseTransformString(b.toTransformString()) || []),
+ (c = a.parseTransformString(c.toTransformString()) || []);
+ for (
+ var e, g, h, i, j = Math.max(b.length, c.length), k = [], n = [], o = 0;
+ j > o;
+ o++
+ ) {
+ if (
+ ((h = b[o] || f(c[o])),
+ (i = c[o] || f(h)),
+ h[0] != i[0] ||
+ ("r" == h[0].toLowerCase() && (h[2] != i[2] || h[3] != i[3])) ||
+ ("s" == h[0].toLowerCase() && (h[3] != i[3] || h[4] != i[4])))
+ ) {
+ (b = a._.transform2matrix(b, d())),
+ (c = a._.transform2matrix(c, d())),
+ (k = [["m", b.a, b.b, b.c, b.d, b.e, b.f]]),
+ (n = [["m", c.a, c.b, c.c, c.d, c.e, c.f]]);
+ break;
+ }
+ for (k[o] = [], n[o] = [], e = 0, g = Math.max(h.length, i.length); g > e; e++)
+ e in h && (k[o][e] = h[e]), e in i && (n[o][e] = i[e]);
+ }
+ return { from: m(k), to: m(n), f: l(k) };
+ }
+ function h(a) {
+ return a;
+ }
+ function i(a) {
+ return function (b) {
+ return +b.toFixed(3) + a;
+ };
+ }
+ function j(a) {
+ return a.join(" ");
+ }
+ function k(b) {
+ return a.rgb(b[0], b[1], b[2], b[3]);
+ }
+ function l(a) {
+ var b,
+ c,
+ d,
+ e,
+ f,
+ g,
+ h = 0,
+ i = [];
+ for (b = 0, c = a.length; c > b; b++) {
+ for (f = "[", g = ['"' + a[b][0] + '"'], d = 1, e = a[b].length; e > d; d++)
+ g[d] = "val[" + h++ + "]";
+ (f += g + "]"), (i[b] = f);
+ }
+ return Function("val", "return Snap.path.toString.call([" + i + "])");
+ }
+ function m(a) {
+ for (var b = [], c = 0, d = a.length; d > c; c++)
+ for (var e = 1, f = a[c].length; f > e; e++) b.push(a[c][e]);
+ return b;
+ }
+ function n(a) {
+ return isFinite(a);
+ }
+ function o(b, c) {
+ return a.is(b, "array") && a.is(c, "array") ? b.toString() == c.toString() : !1;
+ }
+ var p = {},
+ q = /[%a-z]+$/i,
+ r = String;
+ (p.stroke = p.fill = "colour"),
+ (c.prototype.equal = function (a, c) {
+ return b("snap.util.equal", this, a, c).firstDefined();
+ }),
+ b.on("snap.util.equal", function (b, c) {
+ var d,
+ e,
+ f = r(this.attr(b) || ""),
+ s = this;
+ if ("colour" == p[b])
+ return (
+ (d = a.color(f)),
+ (e = a.color(c)),
+ { from: [d.r, d.g, d.b, d.opacity], to: [e.r, e.g, e.b, e.opacity], f: k }
+ );
+ if ("viewBox" == b)
+ return (
+ (d = this.attr(b).vb.split(" ").map(Number)),
+ (e = c.split(" ").map(Number)),
+ { from: d, to: e, f: j }
+ );
+ if ("transform" == b || "gradientTransform" == b || "patternTransform" == b)
+ return (
+ "string" == typeof c && (c = r(c).replace(/\.{3}|\u2026/g, f)),
+ (f = this.matrix),
+ (c = a._.rgTransform.test(c)
+ ? a._.transform2matrix(c, this.getBBox())
+ : a._.transform2matrix(a._.svgTransform2string(c), this.getBBox())),
+ g(f, c, function () {
+ return s.getBBox(1);
+ })
+ );
+ if ("d" == b || "path" == b)
+ return (d = a.path.toCubic(f, c)), { from: m(d[0]), to: m(d[1]), f: l(d[0]) };
+ if ("points" == b)
+ return (
+ (d = r(f).split(a._.separator)),
+ (e = r(c).split(a._.separator)),
+ {
+ from: d,
+ to: e,
+ f: function (a) {
+ return a;
+ },
+ }
+ );
+ if (n(f) && n(c)) return { from: parseFloat(f), to: parseFloat(c), f: h };
+ var t = f.match(q),
+ u = r(c).match(q);
+ return t && o(t, u)
+ ? { from: parseFloat(f), to: parseFloat(c), f: i(t) }
+ : { from: this.asPX(b), to: this.asPX(b, c), f: h };
+ });
+ }),
+ d.plugin(function (a, c, d, e) {
+ for (
+ var f = c.prototype,
+ g = "hasOwnProperty",
+ h = ("createTouch" in e.doc),
+ i = [
+ "click",
+ "dblclick",
+ "mousedown",
+ "mousemove",
+ "mouseout",
+ "mouseover",
+ "mouseup",
+ "touchstart",
+ "touchmove",
+ "touchend",
+ "touchcancel",
+ ],
+ j = { mousedown: "touchstart", mousemove: "touchmove", mouseup: "touchend" },
+ k = function (a, b) {
+ var c = "y" == a ? "scrollTop" : "scrollLeft",
+ d = b && b.node ? b.node.ownerDocument : e.doc;
+ return d[(c in d.documentElement) ? "documentElement" : "body"][c];
+ },
+ l = function () {
+ return this.originalEvent.preventDefault();
+ },
+ m = function () {
+ return this.originalEvent.stopPropagation();
+ },
+ n = function (a, b, c, d) {
+ var e = h && j[b] ? j[b] : b,
+ f = function (e) {
+ var f = k("y", d),
+ i = k("x", d);
+ if (h && j[g](b))
+ for (var n = 0, o = e.targetTouches && e.targetTouches.length; o > n; n++)
+ if (e.targetTouches[n].target == a || a.contains(e.targetTouches[n].target)) {
+ var p = e;
+ (e = e.targetTouches[n]),
+ (e.originalEvent = p),
+ (e.preventDefault = l),
+ (e.stopPropagation = m);
+ break;
+ }
+ var q = e.clientX + i,
+ r = e.clientY + f;
+ return c.call(d, e, q, r);
+ };
+ return (
+ b !== e && a.addEventListener(b, f, !1),
+ a.addEventListener(e, f, !1),
+ function () {
+ return (
+ b !== e && a.removeEventListener(b, f, !1), a.removeEventListener(e, f, !1), !0
+ );
+ }
+ );
+ },
+ o = [],
+ p = function (a) {
+ for (
+ var c, d = a.clientX, e = a.clientY, f = k("y"), g = k("x"), i = o.length;
+ i--;
+
+ ) {
+ if (((c = o[i]), h)) {
+ for (var j, l = a.touches && a.touches.length; l--; )
+ if (
+ ((j = a.touches[l]),
+ j.identifier == c.el._drag.id || c.el.node.contains(j.target))
+ ) {
+ (d = j.clientX),
+ (e = j.clientY),
+ (a.originalEvent ? a.originalEvent : a).preventDefault();
+ break;
+ }
+ } else a.preventDefault();
+ var m = c.el.node;
+ m.nextSibling, m.parentNode, m.style.display;
+ (d += g),
+ (e += f),
+ b(
+ "snap.drag.move." + c.el.id,
+ c.move_scope || c.el,
+ d - c.el._drag.x,
+ e - c.el._drag.y,
+ d,
+ e,
+ a
+ );
+ }
+ },
+ q = function (c) {
+ a.unmousemove(p).unmouseup(q);
+ for (var d, e = o.length; e--; )
+ (d = o[e]),
+ (d.el._drag = {}),
+ b(
+ "snap.drag.end." + d.el.id,
+ d.end_scope || d.start_scope || d.move_scope || d.el,
+ c
+ ),
+ b.off("snap.drag.*." + d.el.id);
+ o = [];
+ },
+ r = i.length;
+ r--;
+
+ )
+ !(function (b) {
+ (a[b] = f[b] =
+ function (c, d) {
+ if (a.is(c, "function"))
+ (this.events = this.events || []),
+ this.events.push({
+ name: b,
+ f: c,
+ unbind: n(this.node || document, b, c, d || this),
+ });
+ else
+ for (var e = 0, f = this.events.length; f > e; e++)
+ if (this.events[e].name == b)
+ try {
+ this.events[e].f.call(this);
+ } catch (g) {}
+ return this;
+ }),
+ (a["un" + b] = f["un" + b] =
+ function (a) {
+ for (var c = this.events || [], d = c.length; d--; )
+ if (c[d].name == b && (c[d].f == a || !a))
+ return c[d].unbind(), c.splice(d, 1), !c.length && delete this.events, this;
+ return this;
+ });
+ })(i[r]);
+ (f.hover = function (a, b, c, d) {
+ return this.mouseover(a, c).mouseout(b, d || c);
+ }),
+ (f.unhover = function (a, b) {
+ return this.unmouseover(a).unmouseout(b);
+ });
+ var s = [];
+ (f.drag = function (c, d, e, f, g, h) {
+ function i(i, j, l) {
+ (i.originalEvent || i).preventDefault(),
+ (k._drag.x = j),
+ (k._drag.y = l),
+ (k._drag.id = i.identifier),
+ !o.length && a.mousemove(p).mouseup(q),
+ o.push({ el: k, move_scope: f, start_scope: g, end_scope: h }),
+ d && b.on("snap.drag.start." + k.id, d),
+ c && b.on("snap.drag.move." + k.id, c),
+ e && b.on("snap.drag.end." + k.id, e),
+ b("snap.drag.start." + k.id, g || f || k, j, l, i);
+ }
+ function j(a, c, d) {
+ b("snap.draginit." + k.id, k, a, c, d);
+ }
+ var k = this;
+ if (!arguments.length) {
+ var l;
+ return k.drag(
+ function (a, b) {
+ this.attr({ transform: l + (l ? "T" : "t") + [a, b] });
+ },
+ function () {
+ l = this.transform().local;
+ }
+ );
+ }
+ return (
+ b.on("snap.draginit." + k.id, i),
+ (k._drag = {}),
+ s.push({ el: k, start: i, init: j }),
+ k.mousedown(j),
+ k
+ );
+ }),
+ (f.undrag = function () {
+ for (var c = s.length; c--; )
+ s[c].el == this &&
+ (this.unmousedown(s[c].init),
+ s.splice(c, 1),
+ b.unbind("snap.drag.*." + this.id),
+ b.unbind("snap.draginit." + this.id));
+ return !s.length && a.unmousemove(p).unmouseup(q), this;
+ });
+ }),
+ d.plugin(function (a, c, d, e) {
+ var f = (c.prototype, d.prototype),
+ g = /^\s*url\((.+)\)/,
+ h = String,
+ i = a._.$;
+ (a.filter = {}),
+ (f.filter = function (b) {
+ var d = this;
+ "svg" != d.type && (d = d.paper);
+ var e = a.parse(h(b)),
+ f = a._.id(),
+ g = (d.node.offsetWidth, d.node.offsetHeight, i("filter"));
+ return (
+ i(g, { id: f, filterUnits: "userSpaceOnUse" }),
+ g.appendChild(e.node),
+ d.defs.appendChild(g),
+ new c(g)
+ );
+ }),
+ b.on("snap.util.getattr.filter", function () {
+ b.stop();
+ var c = i(this.node, "filter");
+ if (c) {
+ var d = h(c).match(g);
+ return d && a.select(d[1]);
+ }
+ }),
+ b.on("snap.util.attr.filter", function (d) {
+ if (d instanceof c && "filter" == d.type) {
+ b.stop();
+ var e = d.node.id;
+ e || (i(d.node, { id: d.id }), (e = d.id)), i(this.node, { filter: a.url(e) });
+ }
+ (d && "none" != d) || (b.stop(), this.node.removeAttribute("filter"));
+ }),
+ (a.filter.blur = function (b, c) {
+ null == b && (b = 2);
+ var d = null == c ? b : [b, c];
+ return a.format('', { def: d });
+ }),
+ (a.filter.blur.toString = function () {
+ return this();
+ }),
+ (a.filter.shadow = function (b, c, d, e, f) {
+ return (
+ null == f &&
+ (null == e ? ((f = d), (d = 4), (e = "#000")) : ((f = e), (e = d), (d = 4))),
+ null == d && (d = 4),
+ null == f && (f = 1),
+ null == b && ((b = 0), (c = 2)),
+ null == c && (c = b),
+ (e = a.color(e)),
+ a.format(
+ '',
+ { color: e, dx: b, dy: c, blur: d, opacity: f }
+ )
+ );
+ }),
+ (a.filter.shadow.toString = function () {
+ return this();
+ }),
+ (a.filter.grayscale = function (b) {
+ return (
+ null == b && (b = 1),
+ a.format(
+ '',
+ {
+ a: 0.2126 + 0.7874 * (1 - b),
+ b: 0.7152 - 0.7152 * (1 - b),
+ c: 0.0722 - 0.0722 * (1 - b),
+ d: 0.2126 - 0.2126 * (1 - b),
+ e: 0.7152 + 0.2848 * (1 - b),
+ f: 0.0722 - 0.0722 * (1 - b),
+ g: 0.2126 - 0.2126 * (1 - b),
+ h: 0.0722 + 0.9278 * (1 - b),
+ }
+ )
+ );
+ }),
+ (a.filter.grayscale.toString = function () {
+ return this();
+ }),
+ (a.filter.sepia = function (b) {
+ return (
+ null == b && (b = 1),
+ a.format(
+ '',
+ {
+ a: 0.393 + 0.607 * (1 - b),
+ b: 0.769 - 0.769 * (1 - b),
+ c: 0.189 - 0.189 * (1 - b),
+ d: 0.349 - 0.349 * (1 - b),
+ e: 0.686 + 0.314 * (1 - b),
+ f: 0.168 - 0.168 * (1 - b),
+ g: 0.272 - 0.272 * (1 - b),
+ h: 0.534 - 0.534 * (1 - b),
+ i: 0.131 + 0.869 * (1 - b),
+ }
+ )
+ );
+ }),
+ (a.filter.sepia.toString = function () {
+ return this();
+ }),
+ (a.filter.saturate = function (b) {
+ return (
+ null == b && (b = 1),
+ a.format('', { amount: 1 - b })
+ );
+ }),
+ (a.filter.saturate.toString = function () {
+ return this();
+ }),
+ (a.filter.hueRotate = function (b) {
+ return (
+ (b = b || 0),
+ a.format('', { angle: b })
+ );
+ }),
+ (a.filter.hueRotate.toString = function () {
+ return this();
+ }),
+ (a.filter.invert = function (b) {
+ return (
+ null == b && (b = 1),
+ a.format(
+ '',
+ { amount: b, amount2: 1 - b }
+ )
+ );
+ }),
+ (a.filter.invert.toString = function () {
+ return this();
+ }),
+ (a.filter.brightness = function (b) {
+ return (
+ null == b && (b = 1),
+ a.format(
+ '',
+ { amount: b }
+ )
+ );
+ }),
+ (a.filter.brightness.toString = function () {
+ return this();
+ }),
+ (a.filter.contrast = function (b) {
+ return (
+ null == b && (b = 1),
+ a.format(
+ '',
+ { amount: b, amount2: 0.5 - b / 2 }
+ )
+ );
+ }),
+ (a.filter.contrast.toString = function () {
+ return this();
+ });
+ }),
+ d.plugin(function (a, b, c, d, e) {
+ var f = a._.box,
+ g = a.is,
+ h = /^[^a-z]*([tbmlrc])/i,
+ i = function () {
+ return "T" + this.dx + "," + this.dy;
+ };
+ (b.prototype.getAlign = function (a, b) {
+ null == b && g(a, "string") && ((b = a), (a = null)), (a = a || this.paper);
+ var c = a.getBBox ? a.getBBox() : f(a),
+ d = this.getBBox(),
+ e = {};
+ switch (((b = b && b.match(h)), (b = b ? b[1].toLowerCase() : "c"))) {
+ case "t":
+ (e.dx = 0), (e.dy = c.y - d.y);
+ break;
+ case "b":
+ (e.dx = 0), (e.dy = c.y2 - d.y2);
+ break;
+ case "m":
+ (e.dx = 0), (e.dy = c.cy - d.cy);
+ break;
+ case "l":
+ (e.dx = c.x - d.x), (e.dy = 0);
+ break;
+ case "r":
+ (e.dx = c.x2 - d.x2), (e.dy = 0);
+ break;
+ default:
+ (e.dx = c.cx - d.cx), (e.dy = 0);
+ }
+ return (e.toString = i), e;
+ }),
+ (b.prototype.align = function (a, b) {
+ return this.transform("..." + this.getAlign(a, b));
+ });
+ }),
+ d.plugin(function (b, c, d, e) {
+ function f(a) {
+ a = a.split(/(?=#)/);
+ var b = new String(a[5]);
+ return (
+ (b[50] = a[0]),
+ (b[100] = a[1]),
+ (b[200] = a[2]),
+ (b[300] = a[3]),
+ (b[400] = a[4]),
+ (b[500] = a[5]),
+ (b[600] = a[6]),
+ (b[700] = a[7]),
+ (b[800] = a[8]),
+ (b[900] = a[9]),
+ a[10] && ((b.A100 = a[10]), (b.A200 = a[11]), (b.A400 = a[12]), (b.A700 = a[13])),
+ b
+ );
+ }
+ var g =
+ "#ffebee#ffcdd2#ef9a9a#e57373#ef5350#f44336#e53935#d32f2f#c62828#b71c1c#ff8a80#ff5252#ff1744#d50000",
+ h =
+ "#FCE4EC#F8BBD0#F48FB1#F06292#EC407A#E91E63#D81B60#C2185B#AD1457#880E4F#FF80AB#FF4081#F50057#C51162",
+ i =
+ "#F3E5F5#E1BEE7#CE93D8#BA68C8#AB47BC#9C27B0#8E24AA#7B1FA2#6A1B9A#4A148C#EA80FC#E040FB#D500F9#AA00FF",
+ j =
+ "#EDE7F6#D1C4E9#B39DDB#9575CD#7E57C2#673AB7#5E35B1#512DA8#4527A0#311B92#B388FF#7C4DFF#651FFF#6200EA",
+ k =
+ "#E8EAF6#C5CAE9#9FA8DA#7986CB#5C6BC0#3F51B5#3949AB#303F9F#283593#1A237E#8C9EFF#536DFE#3D5AFE#304FFE",
+ l =
+ "#E3F2FD#BBDEFB#90CAF9#64B5F6#64B5F6#2196F3#1E88E5#1976D2#1565C0#0D47A1#82B1FF#448AFF#2979FF#2962FF",
+ m =
+ "#E1F5FE#B3E5FC#81D4FA#4FC3F7#29B6F6#03A9F4#039BE5#0288D1#0277BD#01579B#80D8FF#40C4FF#00B0FF#0091EA",
+ n =
+ "#E0F7FA#B2EBF2#80DEEA#4DD0E1#26C6DA#00BCD4#00ACC1#0097A7#00838F#006064#84FFFF#18FFFF#00E5FF#00B8D4",
+ o =
+ "#E0F2F1#B2DFDB#80CBC4#4DB6AC#26A69A#009688#00897B#00796B#00695C#004D40#A7FFEB#64FFDA#1DE9B6#00BFA5",
+ p =
+ "#E8F5E9#C8E6C9#A5D6A7#81C784#66BB6A#4CAF50#43A047#388E3C#2E7D32#1B5E20#B9F6CA#69F0AE#00E676#00C853",
+ q =
+ "#F1F8E9#DCEDC8#C5E1A5#AED581#9CCC65#8BC34A#7CB342#689F38#558B2F#33691E#CCFF90#B2FF59#76FF03#64DD17",
+ r =
+ "#F9FBE7#F0F4C3#E6EE9C#DCE775#D4E157#CDDC39#C0CA33#AFB42B#9E9D24#827717#F4FF81#EEFF41#C6FF00#AEEA00",
+ s =
+ "#FFFDE7#FFF9C4#FFF59D#FFF176#FFEE58#FFEB3B#FDD835#FBC02D#F9A825#F57F17#FFFF8D#FFFF00#FFEA00#FFD600",
+ t =
+ "#FFF8E1#FFECB3#FFE082#FFD54F#FFCA28#FFC107#FFB300#FFA000#FF8F00#FF6F00#FFE57F#FFD740#FFC400#FFAB00",
+ u =
+ "#FFF3E0#FFE0B2#FFCC80#FFB74D#FFA726#FF9800#FB8C00#F57C00#EF6C00#E65100#FFD180#FFAB40#FF9100#FF6D00",
+ v =
+ "#FBE9E7#FFCCBC#FFAB91#FF8A65#FF7043#FF5722#F4511E#E64A19#D84315#BF360C#FF9E80#FF6E40#FF3D00#DD2C00",
+ w = "#EFEBE9#D7CCC8#BCAAA4#A1887F#8D6E63#795548#6D4C41#5D4037#4E342E#3E2723",
+ x = "#FAFAFA#F5F5F5#EEEEEE#E0E0E0#BDBDBD#9E9E9E#757575#616161#424242#212121",
+ y = "#ECEFF1#CFD8DC#B0BEC5#90A4AE#78909C#607D8B#546E7A#455A64#37474F#263238";
+ (b.mui = {}),
+ (b.flat = {}),
+ (b.mui.red = f(g)),
+ (b.mui.pink = f(h)),
+ (b.mui.purple = f(i)),
+ (b.mui.deeppurple = f(j)),
+ (b.mui.indigo = f(k)),
+ (b.mui.blue = f(l)),
+ (b.mui.lightblue = f(m)),
+ (b.mui.cyan = f(n)),
+ (b.mui.teal = f(o)),
+ (b.mui.green = f(p)),
+ (b.mui.lightgreen = f(q)),
+ (b.mui.lime = f(r)),
+ (b.mui.yellow = f(s)),
+ (b.mui.amber = f(t)),
+ (b.mui.orange = f(u)),
+ (b.mui.deeporange = f(v)),
+ (b.mui.brown = f(w)),
+ (b.mui.grey = f(x)),
+ (b.mui.bluegrey = f(y)),
+ (b.flat.turquoise = "#1abc9c"),
+ (b.flat.greensea = "#16a085"),
+ (b.flat.sunflower = "#f1c40f"),
+ (b.flat.orange = "#f39c12"),
+ (b.flat.emerland = "#2ecc71"),
+ (b.flat.nephritis = "#27ae60"),
+ (b.flat.carrot = "#e67e22"),
+ (b.flat.pumpkin = "#d35400"),
+ (b.flat.peterriver = "#3498db"),
+ (b.flat.belizehole = "#2980b9"),
+ (b.flat.alizarin = "#e74c3c"),
+ (b.flat.pomegranate = "#c0392b"),
+ (b.flat.amethyst = "#9b59b6"),
+ (b.flat.wisteria = "#8e44ad"),
+ (b.flat.clouds = "#ecf0f1"),
+ (b.flat.silver = "#bdc3c7"),
+ (b.flat.wetasphalt = "#34495e"),
+ (b.flat.midnightblue = "#2c3e50"),
+ (b.flat.concrete = "#95a5a6"),
+ (b.flat.asbestos = "#7f8c8d"),
+ (b.importMUIColors = function () {
+ for (var c in b.mui) b.mui.hasOwnProperty(c) && (a[c] = b.mui[c]);
+ });
+ }),
+ d
+ );
+ });
diff --git a/docs/demos/js/webgazer/ridgeWorker.mjs b/docs/demos/js/webgazer/ridgeWorker.mjs
new file mode 100644
index 00000000..8e5f57c4
--- /dev/null
+++ b/docs/demos/js/webgazer/ridgeWorker.mjs
@@ -0,0 +1,136 @@
+"use strict";
+
+console.log("thread starting");
+
+// Add src/util.mjs and src/mat.mjs to the same directory as your html file
+importScripts("./worker_scripts/util.js", "./worker_scripts/mat.js"); // [20200708] Figure out how to make all of this wrap up neatly
+var ridgeParameter = Math.pow(10, -5);
+var resizeWidth = 10;
+var resizeHeight = 6;
+var dataWindow = 700;
+var trailDataWindow = 10;
+var trainInterval = 500;
+
+var screenXClicksArray = new self.webgazer.util.DataWindow(dataWindow);
+var screenYClicksArray = new self.webgazer.util.DataWindow(dataWindow);
+var eyeFeaturesClicks = new self.webgazer.util.DataWindow(dataWindow);
+var dataClicks = new self.webgazer.util.DataWindow(dataWindow);
+
+var screenXTrailArray = new self.webgazer.util.DataWindow(trailDataWindow);
+var screenYTrailArray = new self.webgazer.util.DataWindow(trailDataWindow);
+var eyeFeaturesTrail = new self.webgazer.util.DataWindow(trailDataWindow);
+var dataTrail = new self.webgazer.util.DataWindow(trailDataWindow);
+
+/**
+ * Performs ridge regression, according to the Weka code.
+ * @param {Array} y - corresponds to screen coordinates (either x or y) for each of n click events
+ * @param {Array.>} X - corresponds to gray pixel features (120 pixels for both eyes) for each of n clicks
+ * @param {Array} k - ridge parameter
+ * @return{Array} regression coefficients
+ */
+function ridge(y, X, k) {
+ var nc = X[0].length;
+ var m_Coefficients = new Array(nc);
+ var xt = self.webgazer.mat.transpose(X);
+ var solution = new Array();
+ var success = true;
+ do {
+ var ss = self.webgazer.mat.mult(xt, X);
+ // Set ridge regression adjustment
+ for (var i = 0; i < nc; i++) {
+ ss[i][i] = ss[i][i] + k;
+ }
+
+ // Carry out the regression
+ var bb = self.webgazer.mat.mult(xt, y);
+ for (var i = 0; i < nc; i++) {
+ m_Coefficients[i] = bb[i][0];
+ }
+ try {
+ var n = m_Coefficients.length !== 0 ? m_Coefficients.length / m_Coefficients.length : 0;
+ if (m_Coefficients.length * n !== m_Coefficients.length) {
+ console.log("Array length must be a multiple of m");
+ }
+ solution =
+ ss.length === ss[0].length
+ ? self.webgazer.mat.LUDecomposition(ss, bb)
+ : self.webgazer.mat.QRDecomposition(ss, bb);
+
+ for (var i = 0; i < nc; i++) {
+ m_Coefficients[i] = solution[i][0];
+ }
+ success = true;
+ } catch (ex) {
+ k *= 10;
+ console.log(ex);
+ success = false;
+ }
+ } while (!success);
+ return m_Coefficients;
+}
+
+//TODO: still usefull ???
+/**
+ *
+ * @returns {Number}
+ */
+function getCurrentFixationIndex() {
+ var index = 0;
+ var recentX = this.screenXTrailArray.get(0);
+ var recentY = this.screenYTrailArray.get(0);
+ for (var i = this.screenXTrailArray.length - 1; i >= 0; i--) {
+ var currX = this.screenXTrailArray.get(i);
+ var currY = this.screenYTrailArray.get(i);
+ var euclideanDistance = Math.sqrt(Math.pow(currX - recentX, 2) + Math.pow(currY - recentY, 2));
+ if (euclideanDistance > 72) {
+ return i + 1;
+ }
+ }
+ return i;
+}
+
+/**
+ * Event handler, it store screen position to allow training
+ * @param {Event} event - the receive event
+ */
+self.onmessage = function (event) {
+ var data = event.data;
+ var screenPos = data["screenPos"];
+ var eyes = data["eyes"];
+ var type = data["type"];
+ if (type === "click") {
+ self.screenXClicksArray.push([screenPos[0]]);
+ self.screenYClicksArray.push([screenPos[1]]);
+
+ self.eyeFeaturesClicks.push(eyes);
+ } else if (type === "move") {
+ self.screenXTrailArray.push([screenPos[0]]);
+ self.screenYTrailArray.push([screenPos[1]]);
+
+ self.eyeFeaturesTrail.push(eyes);
+ self.dataTrail.push({ eyes: eyes, screenPos: screenPos, type: type });
+ }
+ self.needsTraining = true;
+};
+
+/**
+ * Compute coefficient from training data
+ */
+function retrain() {
+ if (self.screenXClicksArray.length === 0) {
+ return;
+ }
+ if (!self.needsTraining) {
+ return;
+ }
+ var screenXArray = self.screenXClicksArray.data.concat(self.screenXTrailArray.data);
+ var screenYArray = self.screenYClicksArray.data.concat(self.screenYTrailArray.data);
+ var eyeFeatures = self.eyeFeaturesClicks.data.concat(self.eyeFeaturesTrail.data);
+
+ var coefficientsX = ridge(screenXArray, eyeFeatures, ridgeParameter);
+ var coefficientsY = ridge(screenYArray, eyeFeatures, ridgeParameter);
+ self.postMessage({ X: coefficientsX, Y: coefficientsY });
+ self.needsTraining = false;
+}
+
+setInterval(retrain, trainInterval);
diff --git a/docs/demos/js/webgazer/webgazer.js b/docs/demos/js/webgazer/webgazer.js
new file mode 100644
index 00000000..71835bef
--- /dev/null
+++ b/docs/demos/js/webgazer/webgazer.js
@@ -0,0 +1,99912 @@
+/*!
+ *
+ * WebGazer.js: Scalable Webcam EyeTracking Using User Interactions
+ * Copyright (c) 2016-2020, Brown HCI Group
+ * Licensed under GPLv3. Companies with a valuation of less than $1M can use WebGazer.js under LGPLv3.
+ *
+ */
+var webgazer = /******/ (function (modules) {
+ // webpackBootstrap
+ /******/ // The module cache
+ /******/ var installedModules = {};
+ /******/
+ /******/ // The require function
+ /******/ function __webpack_require__(moduleId) {
+ /******/
+ /******/ // Check if module is in cache
+ /******/ if (installedModules[moduleId]) {
+ /******/ return installedModules[moduleId].exports;
+ /******/
+ }
+ /******/ // Create a new module (and put it into the cache)
+ /******/ var module = (installedModules[moduleId] = {
+ /******/ i: moduleId,
+ /******/ l: false,
+ /******/ exports: {},
+ /******/
+ });
+ /******/
+ /******/ // Execute the module function
+ /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
+ /******/
+ /******/ // Flag the module as loaded
+ /******/ module.l = true;
+ /******/
+ /******/ // Return the exports of the module
+ /******/ return module.exports;
+ /******/
+ }
+ /******/
+ /******/
+ /******/ // expose the modules object (__webpack_modules__)
+ /******/ __webpack_require__.m = modules;
+ /******/
+ /******/ // expose the module cache
+ /******/ __webpack_require__.c = installedModules;
+ /******/
+ /******/ // define getter function for harmony exports
+ /******/ __webpack_require__.d = function (exports, name, getter) {
+ /******/ if (!__webpack_require__.o(exports, name)) {
+ /******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
+ /******/
+ }
+ /******/
+ };
+ /******/
+ /******/ // define __esModule on exports
+ /******/ __webpack_require__.r = function (exports) {
+ /******/ if (typeof Symbol !== "undefined" && Symbol.toStringTag) {
+ /******/ Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
+ /******/
+ }
+ /******/ Object.defineProperty(exports, "__esModule", { value: true });
+ /******/
+ };
+ /******/
+ /******/ // create a fake namespace object
+ /******/ // mode & 1: value is a module id, require it
+ /******/ // mode & 2: merge all properties of value into the ns
+ /******/ // mode & 4: return value when already ns object
+ /******/ // mode & 8|1: behave like require
+ /******/ __webpack_require__.t = function (value, mode) {
+ /******/ if (mode & 1) value = __webpack_require__(value);
+ /******/ if (mode & 8) return value;
+ /******/ if (mode & 4 && typeof value === "object" && value && value.__esModule) return value;
+ /******/ var ns = Object.create(null);
+ /******/ __webpack_require__.r(ns);
+ /******/ Object.defineProperty(ns, "default", { enumerable: true, value: value });
+ /******/ if (mode & 2 && typeof value != "string")
+ for (var key in value)
+ __webpack_require__.d(
+ ns,
+ key,
+ function (key) {
+ return value[key];
+ }.bind(null, key)
+ );
+ /******/ return ns;
+ /******/
+ };
+ /******/
+ /******/ // getDefaultExport function for compatibility with non-harmony modules
+ /******/ __webpack_require__.n = function (module) {
+ /******/ var getter =
+ module && module.__esModule
+ ? /******/ function getDefault() {
+ return module["default"];
+ }
+ : /******/ function getModuleExports() {
+ return module;
+ };
+ /******/ __webpack_require__.d(getter, "a", getter);
+ /******/ return getter;
+ /******/
+ };
+ /******/
+ /******/ // Object.prototype.hasOwnProperty.call
+ /******/ __webpack_require__.o = function (object, property) {
+ return Object.prototype.hasOwnProperty.call(object, property);
+ };
+ /******/
+ /******/ // __webpack_public_path__
+ /******/ __webpack_require__.p = "";
+ /******/
+ /******/
+ /******/ // Load entry module and return exports
+ /******/ return __webpack_require__((__webpack_require__.s = 90));
+ /******/
+})(
+ /************************************************************************/
+ /******/ [
+ /* 0 */
+ /***/ function (module, __webpack_exports__, __webpack_require__) {
+ "use strict";
+ // ESM COMPAT FLAG
+ __webpack_require__.r(__webpack_exports__);
+
+ // EXPORTS
+ __webpack_require__.d(__webpack_exports__, "AdadeltaOptimizer", function () {
+ return /* reexport */ adadelta_optimizer_AdadeltaOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "AdagradOptimizer", function () {
+ return /* reexport */ adagrad_optimizer_AdagradOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "AdamOptimizer", function () {
+ return /* reexport */ adam_optimizer_AdamOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "AdamaxOptimizer", function () {
+ return /* reexport */ adamax_optimizer_AdamaxOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "MomentumOptimizer", function () {
+ return /* reexport */ momentum_optimizer_MomentumOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "Optimizer", function () {
+ return /* reexport */ optimizer_Optimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "RMSPropOptimizer", function () {
+ return /* reexport */ rmsprop_optimizer_RMSPropOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "SGDOptimizer", function () {
+ return /* reexport */ sgd_optimizer_SGDOptimizer;
+ });
+ __webpack_require__.d(__webpack_exports__, "Tensor", function () {
+ return /* reexport */ dist_tensor["a" /* Tensor */];
+ });
+ __webpack_require__.d(__webpack_exports__, "TensorBuffer", function () {
+ return /* reexport */ dist_tensor["b" /* TensorBuffer */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Variable", function () {
+ return /* reexport */ dist_tensor["c" /* Variable */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Rank", function () {
+ return /* reexport */ dist_types["a" /* Rank */];
+ });
+ __webpack_require__.d(__webpack_exports__, "sumOutType", function () {
+ return /* reexport */ dist_types["b" /* sumOutType */];
+ });
+ __webpack_require__.d(__webpack_exports__, "upcastType", function () {
+ return /* reexport */ dist_types["c" /* upcastType */];
+ });
+ __webpack_require__.d(__webpack_exports__, "add", function () {
+ return /* reexport */ add;
+ });
+ __webpack_require__.d(__webpack_exports__, "addN", function () {
+ return /* reexport */ addN;
+ });
+ __webpack_require__.d(__webpack_exports__, "atan2", function () {
+ return /* reexport */ atan2;
+ });
+ __webpack_require__.d(__webpack_exports__, "avgPool", function () {
+ return /* reexport */ avgPool;
+ });
+ __webpack_require__.d(__webpack_exports__, "avgPool3d", function () {
+ return /* reexport */ avgPool3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "batchToSpaceND", function () {
+ return /* reexport */ batchToSpaceND;
+ });
+ __webpack_require__.d(__webpack_exports__, "batchNorm", function () {
+ return /* reexport */ batchNorm;
+ });
+ __webpack_require__.d(__webpack_exports__, "batchNorm2d", function () {
+ return /* reexport */ batchNorm2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "batchNorm3d", function () {
+ return /* reexport */ batchNorm3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "batchNorm4d", function () {
+ return /* reexport */ batchNorm4d;
+ });
+ __webpack_require__.d(__webpack_exports__, "broadcastTo", function () {
+ return /* reexport */ broadcastTo;
+ });
+ __webpack_require__.d(__webpack_exports__, "clone", function () {
+ return /* reexport */ clone;
+ });
+ __webpack_require__.d(__webpack_exports__, "complex", function () {
+ return /* reexport */ complex["a" /* complex */];
+ });
+ __webpack_require__.d(__webpack_exports__, "concat", function () {
+ return /* reexport */ concat;
+ });
+ __webpack_require__.d(__webpack_exports__, "concat1d", function () {
+ return /* reexport */ concat1d;
+ });
+ __webpack_require__.d(__webpack_exports__, "concat2d", function () {
+ return /* reexport */ concat2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "concat3d", function () {
+ return /* reexport */ concat3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "concat4d", function () {
+ return /* reexport */ concat4d;
+ });
+ __webpack_require__.d(__webpack_exports__, "conv1d", function () {
+ return /* reexport */ conv1d;
+ });
+ __webpack_require__.d(__webpack_exports__, "conv2d", function () {
+ return /* reexport */ conv2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "conv2dTranspose", function () {
+ return /* reexport */ conv2dTranspose;
+ });
+ __webpack_require__.d(__webpack_exports__, "conv3d", function () {
+ return /* reexport */ conv3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "conv3dTranspose", function () {
+ return /* reexport */ conv3dTranspose;
+ });
+ __webpack_require__.d(__webpack_exports__, "cumsum", function () {
+ return /* reexport */ cumsum;
+ });
+ __webpack_require__.d(__webpack_exports__, "depthToSpace", function () {
+ return /* reexport */ depthToSpace;
+ });
+ __webpack_require__.d(__webpack_exports__, "depthwiseConv2d", function () {
+ return /* reexport */ depthwiseConv2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "diag", function () {
+ return /* reexport */ diag;
+ });
+ __webpack_require__.d(__webpack_exports__, "div", function () {
+ return /* reexport */ div;
+ });
+ __webpack_require__.d(__webpack_exports__, "divNoNan", function () {
+ return /* reexport */ divNoNan;
+ });
+ __webpack_require__.d(__webpack_exports__, "dot", function () {
+ return /* reexport */ dot;
+ });
+ __webpack_require__.d(__webpack_exports__, "elu", function () {
+ return /* reexport */ elu;
+ });
+ __webpack_require__.d(__webpack_exports__, "equal", function () {
+ return /* reexport */ equal;
+ });
+ __webpack_require__.d(__webpack_exports__, "eye", function () {
+ return /* reexport */ eye;
+ });
+ __webpack_require__.d(__webpack_exports__, "fill", function () {
+ return /* reexport */ fill;
+ });
+ __webpack_require__.d(__webpack_exports__, "floorDiv", function () {
+ return /* reexport */ floorDiv;
+ });
+ __webpack_require__.d(__webpack_exports__, "greater", function () {
+ return /* reexport */ greater;
+ });
+ __webpack_require__.d(__webpack_exports__, "greaterEqual", function () {
+ return /* reexport */ greaterEqual;
+ });
+ __webpack_require__.d(__webpack_exports__, "imag", function () {
+ return /* reexport */ imag["a" /* imag */];
+ });
+ __webpack_require__.d(__webpack_exports__, "leakyRelu", function () {
+ return /* reexport */ leakyRelu;
+ });
+ __webpack_require__.d(__webpack_exports__, "less", function () {
+ return /* reexport */ less;
+ });
+ __webpack_require__.d(__webpack_exports__, "lessEqual", function () {
+ return /* reexport */ lessEqual;
+ });
+ __webpack_require__.d(__webpack_exports__, "localResponseNormalization", function () {
+ return /* reexport */ localResponseNormalization;
+ });
+ __webpack_require__.d(__webpack_exports__, "matMul", function () {
+ return /* reexport */ matMul;
+ });
+ __webpack_require__.d(__webpack_exports__, "max", function () {
+ return /* reexport */ max_max;
+ });
+ __webpack_require__.d(__webpack_exports__, "maxPool", function () {
+ return /* reexport */ maxPool;
+ });
+ __webpack_require__.d(__webpack_exports__, "maxPool3d", function () {
+ return /* reexport */ maxPool3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "maxPoolWithArgmax", function () {
+ return /* reexport */ maxPoolWithArgmax;
+ });
+ __webpack_require__.d(__webpack_exports__, "maximum", function () {
+ return /* reexport */ maximum;
+ });
+ __webpack_require__.d(__webpack_exports__, "minimum", function () {
+ return /* reexport */ minimum;
+ });
+ __webpack_require__.d(__webpack_exports__, "mod", function () {
+ return /* reexport */ mod;
+ });
+ __webpack_require__.d(__webpack_exports__, "mul", function () {
+ return /* reexport */ mul;
+ });
+ __webpack_require__.d(__webpack_exports__, "multinomial", function () {
+ return /* reexport */ multinomial;
+ });
+ __webpack_require__.d(__webpack_exports__, "notEqual", function () {
+ return /* reexport */ notEqual;
+ });
+ __webpack_require__.d(__webpack_exports__, "oneHot", function () {
+ return /* reexport */ oneHot;
+ });
+ __webpack_require__.d(__webpack_exports__, "outerProduct", function () {
+ return /* reexport */ outerProduct;
+ });
+ __webpack_require__.d(__webpack_exports__, "pad", function () {
+ return /* reexport */ pad_pad;
+ });
+ __webpack_require__.d(__webpack_exports__, "pad1d", function () {
+ return /* reexport */ pad1d;
+ });
+ __webpack_require__.d(__webpack_exports__, "pad2d", function () {
+ return /* reexport */ pad2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "pad3d", function () {
+ return /* reexport */ pad3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "pad4d", function () {
+ return /* reexport */ pad4d;
+ });
+ __webpack_require__.d(__webpack_exports__, "pool", function () {
+ return /* reexport */ pool;
+ });
+ __webpack_require__.d(__webpack_exports__, "pow", function () {
+ return /* reexport */ pow;
+ });
+ __webpack_require__.d(__webpack_exports__, "prelu", function () {
+ return /* reexport */ prelu;
+ });
+ __webpack_require__.d(__webpack_exports__, "rand", function () {
+ return /* reexport */ rand;
+ });
+ __webpack_require__.d(__webpack_exports__, "randomGamma", function () {
+ return /* reexport */ randomGamma;
+ });
+ __webpack_require__.d(__webpack_exports__, "randomNormal", function () {
+ return /* reexport */ randomNormal;
+ });
+ __webpack_require__.d(__webpack_exports__, "randomUniform", function () {
+ return /* reexport */ randomUniform;
+ });
+ __webpack_require__.d(__webpack_exports__, "real", function () {
+ return /* reexport */ real["a" /* real */];
+ });
+ __webpack_require__.d(__webpack_exports__, "relu", function () {
+ return /* reexport */ relu;
+ });
+ __webpack_require__.d(__webpack_exports__, "relu6", function () {
+ return /* reexport */ relu6;
+ });
+ __webpack_require__.d(__webpack_exports__, "selu", function () {
+ return /* reexport */ selu;
+ });
+ __webpack_require__.d(__webpack_exports__, "separableConv2d", function () {
+ return /* reexport */ separableConv2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "spaceToBatchND", function () {
+ return /* reexport */ spaceToBatchND;
+ });
+ __webpack_require__.d(__webpack_exports__, "split", function () {
+ return /* reexport */ split;
+ });
+ __webpack_require__.d(__webpack_exports__, "square", function () {
+ return /* reexport */ square;
+ });
+ __webpack_require__.d(__webpack_exports__, "squaredDifference", function () {
+ return /* reexport */ squaredDifference;
+ });
+ __webpack_require__.d(__webpack_exports__, "sub", function () {
+ return /* reexport */ sub;
+ });
+ __webpack_require__.d(__webpack_exports__, "tile", function () {
+ return /* reexport */ tile;
+ });
+ __webpack_require__.d(__webpack_exports__, "truncatedNormal", function () {
+ return /* reexport */ truncatedNormal;
+ });
+ __webpack_require__.d(__webpack_exports__, "booleanMaskAsync", function () {
+ return /* reexport */ booleanMaskAsync;
+ });
+ __webpack_require__.d(__webpack_exports__, "reverse", function () {
+ return /* reexport */ reverse_reverse;
+ });
+ __webpack_require__.d(__webpack_exports__, "reverse1d", function () {
+ return /* reexport */ reverse1d;
+ });
+ __webpack_require__.d(__webpack_exports__, "reverse2d", function () {
+ return /* reexport */ reverse2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "reverse3d", function () {
+ return /* reexport */ reverse3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "reverse4d", function () {
+ return /* reexport */ reverse4d;
+ });
+ __webpack_require__.d(__webpack_exports__, "slice", function () {
+ return /* reexport */ slice;
+ });
+ __webpack_require__.d(__webpack_exports__, "slice1d", function () {
+ return /* reexport */ slice1d;
+ });
+ __webpack_require__.d(__webpack_exports__, "slice2d", function () {
+ return /* reexport */ slice2d;
+ });
+ __webpack_require__.d(__webpack_exports__, "slice3d", function () {
+ return /* reexport */ slice3d;
+ });
+ __webpack_require__.d(__webpack_exports__, "slice4d", function () {
+ return /* reexport */ slice4d;
+ });
+ __webpack_require__.d(__webpack_exports__, "abs", function () {
+ return /* reexport */ abs;
+ });
+ __webpack_require__.d(__webpack_exports__, "acos", function () {
+ return /* reexport */ acos;
+ });
+ __webpack_require__.d(__webpack_exports__, "acosh", function () {
+ return /* reexport */ acosh;
+ });
+ __webpack_require__.d(__webpack_exports__, "asin", function () {
+ return /* reexport */ asin;
+ });
+ __webpack_require__.d(__webpack_exports__, "asinh", function () {
+ return /* reexport */ asinh;
+ });
+ __webpack_require__.d(__webpack_exports__, "atan", function () {
+ return /* reexport */ atan;
+ });
+ __webpack_require__.d(__webpack_exports__, "atanh", function () {
+ return /* reexport */ atanh;
+ });
+ __webpack_require__.d(__webpack_exports__, "ceil", function () {
+ return /* reexport */ ceil;
+ });
+ __webpack_require__.d(__webpack_exports__, "clipByValue", function () {
+ return /* reexport */ clipByValue;
+ });
+ __webpack_require__.d(__webpack_exports__, "cos", function () {
+ return /* reexport */ cos;
+ });
+ __webpack_require__.d(__webpack_exports__, "cosh", function () {
+ return /* reexport */ cosh;
+ });
+ __webpack_require__.d(__webpack_exports__, "erf", function () {
+ return /* reexport */ erf;
+ });
+ __webpack_require__.d(__webpack_exports__, "exp", function () {
+ return /* reexport */ unary_ops_exp;
+ });
+ __webpack_require__.d(__webpack_exports__, "expm1", function () {
+ return /* reexport */ expm1;
+ });
+ __webpack_require__.d(__webpack_exports__, "floor", function () {
+ return /* reexport */ floor;
+ });
+ __webpack_require__.d(__webpack_exports__, "log", function () {
+ return /* reexport */ log;
+ });
+ __webpack_require__.d(__webpack_exports__, "log1p", function () {
+ return /* reexport */ log1p;
+ });
+ __webpack_require__.d(__webpack_exports__, "logSigmoid", function () {
+ return /* reexport */ logSigmoid;
+ });
+ __webpack_require__.d(__webpack_exports__, "neg", function () {
+ return /* reexport */ neg;
+ });
+ __webpack_require__.d(__webpack_exports__, "reciprocal", function () {
+ return /* reexport */ reciprocal;
+ });
+ __webpack_require__.d(__webpack_exports__, "round", function () {
+ return /* reexport */ round;
+ });
+ __webpack_require__.d(__webpack_exports__, "rsqrt", function () {
+ return /* reexport */ rsqrt;
+ });
+ __webpack_require__.d(__webpack_exports__, "sigmoid", function () {
+ return /* reexport */ sigmoid;
+ });
+ __webpack_require__.d(__webpack_exports__, "sign", function () {
+ return /* reexport */ sign;
+ });
+ __webpack_require__.d(__webpack_exports__, "isNaN", function () {
+ return /* reexport */ unary_ops_isNaN;
+ });
+ __webpack_require__.d(__webpack_exports__, "isInf", function () {
+ return /* reexport */ isInf;
+ });
+ __webpack_require__.d(__webpack_exports__, "isFinite", function () {
+ return /* reexport */ unary_ops_isFinite;
+ });
+ __webpack_require__.d(__webpack_exports__, "sin", function () {
+ return /* reexport */ sin;
+ });
+ __webpack_require__.d(__webpack_exports__, "sinh", function () {
+ return /* reexport */ sinh;
+ });
+ __webpack_require__.d(__webpack_exports__, "softplus", function () {
+ return /* reexport */ softplus;
+ });
+ __webpack_require__.d(__webpack_exports__, "sqrt", function () {
+ return /* reexport */ sqrt;
+ });
+ __webpack_require__.d(__webpack_exports__, "step", function () {
+ return /* reexport */ unary_ops_step;
+ });
+ __webpack_require__.d(__webpack_exports__, "tan", function () {
+ return /* reexport */ tan;
+ });
+ __webpack_require__.d(__webpack_exports__, "tanh", function () {
+ return /* reexport */ tanh;
+ });
+ __webpack_require__.d(__webpack_exports__, "all", function () {
+ return /* reexport */ reduction_ops_all;
+ });
+ __webpack_require__.d(__webpack_exports__, "any", function () {
+ return /* reexport */ any;
+ });
+ __webpack_require__.d(__webpack_exports__, "argMax", function () {
+ return /* reexport */ argMax;
+ });
+ __webpack_require__.d(__webpack_exports__, "argMin", function () {
+ return /* reexport */ argMin;
+ });
+ __webpack_require__.d(__webpack_exports__, "logSumExp", function () {
+ return /* reexport */ logSumExp;
+ });
+ __webpack_require__.d(__webpack_exports__, "mean", function () {
+ return /* reexport */ reduction_ops_mean;
+ });
+ __webpack_require__.d(__webpack_exports__, "min", function () {
+ return /* reexport */ reduction_ops_min;
+ });
+ __webpack_require__.d(__webpack_exports__, "moments", function () {
+ return /* reexport */ moments;
+ });
+ __webpack_require__.d(__webpack_exports__, "sum", function () {
+ return /* reexport */ sum;
+ });
+ __webpack_require__.d(__webpack_exports__, "prod", function () {
+ return /* reexport */ reduction_ops_prod;
+ });
+ __webpack_require__.d(__webpack_exports__, "equalStrict", function () {
+ return /* reexport */ equalStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "greaterEqualStrict", function () {
+ return /* reexport */ greaterEqualStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "greaterStrict", function () {
+ return /* reexport */ greaterStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "lessEqualStrict", function () {
+ return /* reexport */ lessEqualStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "lessStrict", function () {
+ return /* reexport */ lessStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "notEqualStrict", function () {
+ return /* reexport */ notEqualStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "addStrict", function () {
+ return /* reexport */ addStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "divStrict", function () {
+ return /* reexport */ divStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "maximumStrict", function () {
+ return /* reexport */ maximumStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "minimumStrict", function () {
+ return /* reexport */ minimumStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "modStrict", function () {
+ return /* reexport */ modStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "mulStrict", function () {
+ return /* reexport */ mulStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "powStrict", function () {
+ return /* reexport */ powStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "squaredDifferenceStrict", function () {
+ return /* reexport */ squaredDifferenceStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "subStrict", function () {
+ return /* reexport */ subStrict;
+ });
+ __webpack_require__.d(__webpack_exports__, "logicalAnd", function () {
+ return /* reexport */ logicalAnd;
+ });
+ __webpack_require__.d(__webpack_exports__, "logicalNot", function () {
+ return /* reexport */ logicalNot;
+ });
+ __webpack_require__.d(__webpack_exports__, "logicalOr", function () {
+ return /* reexport */ logicalOr;
+ });
+ __webpack_require__.d(__webpack_exports__, "logicalXor", function () {
+ return /* reexport */ logicalXor;
+ });
+ __webpack_require__.d(__webpack_exports__, "where", function () {
+ return /* reexport */ where;
+ });
+ __webpack_require__.d(__webpack_exports__, "whereAsync", function () {
+ return /* reexport */ whereAsync;
+ });
+ __webpack_require__.d(__webpack_exports__, "buffer", function () {
+ return /* reexport */ array_ops_buffer;
+ });
+ __webpack_require__.d(__webpack_exports__, "print", function () {
+ return /* reexport */ print;
+ });
+ __webpack_require__.d(__webpack_exports__, "cast", function () {
+ return /* reexport */ cast;
+ });
+ __webpack_require__.d(__webpack_exports__, "expandDims", function () {
+ return /* reexport */ expandDims;
+ });
+ __webpack_require__.d(__webpack_exports__, "reshape", function () {
+ return /* reexport */ reshape;
+ });
+ __webpack_require__.d(__webpack_exports__, "squeeze", function () {
+ return /* reexport */ squeeze;
+ });
+ __webpack_require__.d(__webpack_exports__, "stack", function () {
+ return /* reexport */ stack;
+ });
+ __webpack_require__.d(__webpack_exports__, "unstack", function () {
+ return /* reexport */ unstack;
+ });
+ __webpack_require__.d(__webpack_exports__, "setdiff1dAsync", function () {
+ return /* reexport */ setdiff1dAsync;
+ });
+ __webpack_require__.d(__webpack_exports__, "linspace", function () {
+ return /* reexport */ tensor_ops["a" /* linspace */];
+ });
+ __webpack_require__.d(__webpack_exports__, "ones", function () {
+ return /* reexport */ tensor_ops["b" /* ones */];
+ });
+ __webpack_require__.d(__webpack_exports__, "range", function () {
+ return /* reexport */ tensor_ops["d" /* range */];
+ });
+ __webpack_require__.d(__webpack_exports__, "scalar", function () {
+ return /* reexport */ tensor_ops["e" /* scalar */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor", function () {
+ return /* reexport */ tensor_ops["f" /* tensor */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor1d", function () {
+ return /* reexport */ tensor_ops["g" /* tensor1d */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor2d", function () {
+ return /* reexport */ tensor_ops["h" /* tensor2d */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor3d", function () {
+ return /* reexport */ tensor_ops["i" /* tensor3d */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor4d", function () {
+ return /* reexport */ tensor_ops["j" /* tensor4d */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor5d", function () {
+ return /* reexport */ tensor_ops["k" /* tensor5d */];
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor6d", function () {
+ return /* reexport */ tensor_ops["l" /* tensor6d */];
+ });
+ __webpack_require__.d(__webpack_exports__, "variable", function () {
+ return /* reexport */ tensor_ops["m" /* variable */];
+ });
+ __webpack_require__.d(__webpack_exports__, "zeros", function () {
+ return /* reexport */ tensor_ops["n" /* zeros */];
+ });
+ __webpack_require__.d(__webpack_exports__, "onesLike", function () {
+ return /* reexport */ tensor_ops["c" /* onesLike */];
+ });
+ __webpack_require__.d(__webpack_exports__, "zerosLike", function () {
+ return /* reexport */ tensor_ops["o" /* zerosLike */];
+ });
+ __webpack_require__.d(__webpack_exports__, "transpose", function () {
+ return /* reexport */ transpose;
+ });
+ __webpack_require__.d(__webpack_exports__, "softmax", function () {
+ return /* reexport */ softmax;
+ });
+ __webpack_require__.d(__webpack_exports__, "logSoftmax", function () {
+ return /* reexport */ logSoftmax;
+ });
+ __webpack_require__.d(__webpack_exports__, "norm", function () {
+ return /* reexport */ norm_norm;
+ });
+ __webpack_require__.d(__webpack_exports__, "gather", function () {
+ return /* reexport */ gather;
+ });
+ __webpack_require__.d(__webpack_exports__, "unsortedSegmentSum", function () {
+ return /* reexport */ unsortedSegmentSum;
+ });
+ __webpack_require__.d(__webpack_exports__, "basicLSTMCell", function () {
+ return /* reexport */ basicLSTMCell;
+ });
+ __webpack_require__.d(__webpack_exports__, "multiRNNCell", function () {
+ return /* reexport */ multiRNNCell;
+ });
+ __webpack_require__.d(__webpack_exports__, "movingAverage", function () {
+ return /* reexport */ movingAverage;
+ });
+ __webpack_require__.d(__webpack_exports__, "stridedSlice", function () {
+ return /* reexport */ stridedSlice;
+ });
+ __webpack_require__.d(__webpack_exports__, "topk", function () {
+ return /* reexport */ topk;
+ });
+ __webpack_require__.d(__webpack_exports__, "scatterND", function () {
+ return /* reexport */ scatterND;
+ });
+ __webpack_require__.d(__webpack_exports__, "fft", function () {
+ return /* reexport */ fft;
+ });
+ __webpack_require__.d(__webpack_exports__, "ifft", function () {
+ return /* reexport */ ifft;
+ });
+ __webpack_require__.d(__webpack_exports__, "rfft", function () {
+ return /* reexport */ rfft;
+ });
+ __webpack_require__.d(__webpack_exports__, "irfft", function () {
+ return /* reexport */ irfft;
+ });
+ __webpack_require__.d(__webpack_exports__, "sparseToDense", function () {
+ return /* reexport */ sparseToDense;
+ });
+ __webpack_require__.d(__webpack_exports__, "gatherND", function () {
+ return /* reexport */ gatherND;
+ });
+ __webpack_require__.d(__webpack_exports__, "dropout", function () {
+ return /* reexport */ dropout;
+ });
+ __webpack_require__.d(__webpack_exports__, "hannWindow", function () {
+ return /* reexport */ hannWindow;
+ });
+ __webpack_require__.d(__webpack_exports__, "hammingWindow", function () {
+ return /* reexport */ hammingWindow;
+ });
+ __webpack_require__.d(__webpack_exports__, "frame", function () {
+ return /* reexport */ signal_ops_frame;
+ });
+ __webpack_require__.d(__webpack_exports__, "stft", function () {
+ return /* reexport */ stft;
+ });
+ __webpack_require__.d(__webpack_exports__, "inTopKAsync", function () {
+ return /* reexport */ inTopKAsync;
+ });
+ __webpack_require__.d(__webpack_exports__, "op", function () {
+ return /* reexport */ operation["a" /* op */];
+ });
+ __webpack_require__.d(__webpack_exports__, "image", function () {
+ return /* reexport */ image_ops_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "linalg", function () {
+ return /* reexport */ linalg_ops_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "losses", function () {
+ return /* reexport */ loss_ops_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "spectral", function () {
+ return /* reexport */ spectral_ops_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "fused", function () {
+ return /* reexport */ fused_ops_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "signal", function () {
+ return /* reexport */ signal_ops_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "Reduction", function () {
+ return /* reexport */ Reduction;
+ });
+ __webpack_require__.d(__webpack_exports__, "train", function () {
+ return /* reexport */ train;
+ });
+ __webpack_require__.d(__webpack_exports__, "enableProdMode", function () {
+ return /* reexport */ enableProdMode;
+ });
+ __webpack_require__.d(__webpack_exports__, "enableDebugMode", function () {
+ return /* reexport */ enableDebugMode;
+ });
+ __webpack_require__.d(__webpack_exports__, "disableDeprecationWarnings", function () {
+ return /* reexport */ disableDeprecationWarnings;
+ });
+ __webpack_require__.d(__webpack_exports__, "deprecationWarn", function () {
+ return /* reexport */ deprecationWarn;
+ });
+ __webpack_require__.d(__webpack_exports__, "disposeVariables", function () {
+ return /* reexport */ disposeVariables;
+ });
+ __webpack_require__.d(__webpack_exports__, "engine", function () {
+ return /* reexport */ globals_engine;
+ });
+ __webpack_require__.d(__webpack_exports__, "memory", function () {
+ return /* reexport */ memory;
+ });
+ __webpack_require__.d(__webpack_exports__, "profile", function () {
+ return /* reexport */ profile;
+ });
+ __webpack_require__.d(__webpack_exports__, "tidy", function () {
+ return /* reexport */ tidy;
+ });
+ __webpack_require__.d(__webpack_exports__, "dispose", function () {
+ return /* reexport */ dispose;
+ });
+ __webpack_require__.d(__webpack_exports__, "keep", function () {
+ return /* reexport */ keep;
+ });
+ __webpack_require__.d(__webpack_exports__, "time", function () {
+ return /* reexport */ time;
+ });
+ __webpack_require__.d(__webpack_exports__, "setBackend", function () {
+ return /* reexport */ setBackend;
+ });
+ __webpack_require__.d(__webpack_exports__, "ready", function () {
+ return /* reexport */ ready;
+ });
+ __webpack_require__.d(__webpack_exports__, "getBackend", function () {
+ return /* reexport */ getBackend;
+ });
+ __webpack_require__.d(__webpack_exports__, "removeBackend", function () {
+ return /* reexport */ removeBackend;
+ });
+ __webpack_require__.d(__webpack_exports__, "findBackend", function () {
+ return /* reexport */ findBackend;
+ });
+ __webpack_require__.d(__webpack_exports__, "findBackendFactory", function () {
+ return /* reexport */ findBackendFactory;
+ });
+ __webpack_require__.d(__webpack_exports__, "registerBackend", function () {
+ return /* reexport */ registerBackend;
+ });
+ __webpack_require__.d(__webpack_exports__, "backend", function () {
+ return /* reexport */ globals_backend;
+ });
+ __webpack_require__.d(__webpack_exports__, "setPlatform", function () {
+ return /* reexport */ setPlatform;
+ });
+ __webpack_require__.d(__webpack_exports__, "getKernel", function () {
+ return /* reexport */ kernel_registry["b" /* getKernel */];
+ });
+ __webpack_require__.d(__webpack_exports__, "getGradient", function () {
+ return /* reexport */ kernel_registry["a" /* getGradient */];
+ });
+ __webpack_require__.d(__webpack_exports__, "getKernelsForBackend", function () {
+ return /* reexport */ kernel_registry["c" /* getKernelsForBackend */];
+ });
+ __webpack_require__.d(__webpack_exports__, "registerKernel", function () {
+ return /* reexport */ kernel_registry["e" /* registerKernel */];
+ });
+ __webpack_require__.d(__webpack_exports__, "registerGradient", function () {
+ return /* reexport */ kernel_registry["d" /* registerGradient */];
+ });
+ __webpack_require__.d(__webpack_exports__, "unregisterKernel", function () {
+ return /* reexport */ kernel_registry["g" /* unregisterKernel */];
+ });
+ __webpack_require__.d(__webpack_exports__, "unregisterGradient", function () {
+ return /* reexport */ kernel_registry["f" /* unregisterGradient */];
+ });
+ __webpack_require__.d(__webpack_exports__, "customGrad", function () {
+ return /* reexport */ customGrad;
+ });
+ __webpack_require__.d(__webpack_exports__, "grad", function () {
+ return /* reexport */ gradients_grad;
+ });
+ __webpack_require__.d(__webpack_exports__, "grads", function () {
+ return /* reexport */ gradients_grads;
+ });
+ __webpack_require__.d(__webpack_exports__, "valueAndGrad", function () {
+ return /* reexport */ valueAndGrad;
+ });
+ __webpack_require__.d(__webpack_exports__, "valueAndGrads", function () {
+ return /* reexport */ valueAndGrads;
+ });
+ __webpack_require__.d(__webpack_exports__, "variableGrads", function () {
+ return /* reexport */ variableGrads;
+ });
+ __webpack_require__.d(__webpack_exports__, "Environment", function () {
+ return /* reexport */ environment["b" /* Environment */];
+ });
+ __webpack_require__.d(__webpack_exports__, "env", function () {
+ return /* reexport */ environment["c" /* env */];
+ });
+ __webpack_require__.d(__webpack_exports__, "ENV", function () {
+ return /* reexport */ environment["a" /* ENV */];
+ });
+ __webpack_require__.d(__webpack_exports__, "version_core", function () {
+ return /* reexport */ version;
+ });
+ __webpack_require__.d(__webpack_exports__, "nextFrame", function () {
+ return /* reexport */ browser_util["a" /* nextFrame */];
+ });
+ __webpack_require__.d(__webpack_exports__, "browser", function () {
+ return /* reexport */ browser_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "io", function () {
+ return /* reexport */ io_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "math", function () {
+ return /* reexport */ math_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "serialization", function () {
+ return /* reexport */ serialization_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "test_util", function () {
+ return /* reexport */ test_util_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "util", function () {
+ return /* reexport */ util;
+ });
+ __webpack_require__.d(__webpack_exports__, "backend_util", function () {
+ return /* reexport */ backend_util_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "tensor_util", function () {
+ return /* reexport */ tensor_util;
+ });
+ __webpack_require__.d(__webpack_exports__, "slice_util", function () {
+ return /* reexport */ slice_util_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "gather_util", function () {
+ return /* reexport */ gather_nd_util_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "scatter_util", function () {
+ return /* reexport */ scatter_nd_util_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "device_util", function () {
+ return /* reexport */ device_util;
+ });
+ __webpack_require__.d(__webpack_exports__, "kernel_impls", function () {
+ return /* reexport */ kernel_impls_namespaceObject;
+ });
+ __webpack_require__.d(__webpack_exports__, "KernelBackend", function () {
+ return /* reexport */ KernelBackend;
+ });
+ __webpack_require__.d(__webpack_exports__, "DataStorage", function () {
+ return /* reexport */ DataStorage;
+ });
+ __webpack_require__.d(__webpack_exports__, "Add", function () {
+ return /* reexport */ kernel_names["a" /* Add */];
+ });
+ __webpack_require__.d(__webpack_exports__, "AddN", function () {
+ return /* reexport */ kernel_names["b" /* AddN */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Atan2", function () {
+ return /* reexport */ kernel_names["c" /* Atan2 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "AvgPool", function () {
+ return /* reexport */ kernel_names["d" /* AvgPool */];
+ });
+ __webpack_require__.d(__webpack_exports__, "AvgPoolBackprop", function () {
+ return /* reexport */ kernel_names["g" /* AvgPoolBackprop */];
+ });
+ __webpack_require__.d(__webpack_exports__, "AvgPool3D", function () {
+ return /* reexport */ kernel_names["e" /* AvgPool3D */];
+ });
+ __webpack_require__.d(__webpack_exports__, "AvgPool3DBackprop", function () {
+ return /* reexport */ kernel_names["f" /* AvgPool3DBackprop */];
+ });
+ __webpack_require__.d(__webpack_exports__, "BatchMatMul", function () {
+ return /* reexport */ kernel_names["h" /* BatchMatMul */];
+ });
+ __webpack_require__.d(__webpack_exports__, "BatchToSpaceND", function () {
+ return /* reexport */ kernel_names["i" /* BatchToSpaceND */];
+ });
+ __webpack_require__.d(__webpack_exports__, "BroadcastTo", function () {
+ return /* reexport */ kernel_names["j" /* BroadcastTo */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Complex", function () {
+ return /* reexport */ kernel_names["k" /* Complex */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Concat", function () {
+ return /* reexport */ kernel_names["l" /* Concat */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Conv2D", function () {
+ return /* reexport */ kernel_names["m" /* Conv2D */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Conv2DBackpropFilter", function () {
+ return /* reexport */ kernel_names["n" /* Conv2DBackpropFilter */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Conv2DBackpropInput", function () {
+ return /* reexport */ kernel_names["o" /* Conv2DBackpropInput */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Conv3D", function () {
+ return /* reexport */ kernel_names["p" /* Conv3D */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Conv3DBackpropFilterV2", function () {
+ return /* reexport */ kernel_names["q" /* Conv3DBackpropFilterV2 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Conv3DBackpropInputV2", function () {
+ return /* reexport */ kernel_names["r" /* Conv3DBackpropInputV2 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Cumsum", function () {
+ return /* reexport */ kernel_names["s" /* Cumsum */];
+ });
+ __webpack_require__.d(__webpack_exports__, "DepthToSpace", function () {
+ return /* reexport */ kernel_names["t" /* DepthToSpace */];
+ });
+ __webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNative", function () {
+ return /* reexport */ kernel_names["u" /* DepthwiseConv2dNative */];
+ });
+ __webpack_require__.d(
+ __webpack_exports__,
+ "DepthwiseConv2dNativeBackpropFilter",
+ function () {
+ return /* reexport */ kernel_names["v" /* DepthwiseConv2dNativeBackpropFilter */];
+ }
+ );
+ __webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNativeBackpropInput", function () {
+ return /* reexport */ kernel_names["w" /* DepthwiseConv2dNativeBackpropInput */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Diag", function () {
+ return /* reexport */ kernel_names["x" /* Diag */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Div", function () {
+ return /* reexport */ kernel_names["y" /* Div */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Elu", function () {
+ return /* reexport */ kernel_names["z" /* Elu */];
+ });
+ __webpack_require__.d(__webpack_exports__, "EluGrad", function () {
+ return /* reexport */ kernel_names["A" /* EluGrad */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Equal", function () {
+ return /* reexport */ kernel_names["B" /* Equal */];
+ });
+ __webpack_require__.d(__webpack_exports__, "FloorDiv", function () {
+ return /* reexport */ kernel_names["D" /* FloorDiv */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Fill", function () {
+ return /* reexport */ kernel_names["C" /* Fill */];
+ });
+ __webpack_require__.d(__webpack_exports__, "FusedBatchNorm", function () {
+ return /* reexport */ kernel_names["F" /* FusedBatchNorm */];
+ });
+ __webpack_require__.d(__webpack_exports__, "GatherNd", function () {
+ return /* reexport */ kernel_names["G" /* GatherNd */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Greater", function () {
+ return /* reexport */ kernel_names["H" /* Greater */];
+ });
+ __webpack_require__.d(__webpack_exports__, "GreaterEqual", function () {
+ return /* reexport */ kernel_names["I" /* GreaterEqual */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Identity", function () {
+ return /* reexport */ kernel_names["J" /* Identity */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Imag", function () {
+ return /* reexport */ kernel_names["K" /* Imag */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Less", function () {
+ return /* reexport */ kernel_names["N" /* Less */];
+ });
+ __webpack_require__.d(__webpack_exports__, "LessEqual", function () {
+ return /* reexport */ kernel_names["O" /* LessEqual */];
+ });
+ __webpack_require__.d(__webpack_exports__, "LRN", function () {
+ return /* reexport */ kernel_names["L" /* LRN */];
+ });
+ __webpack_require__.d(__webpack_exports__, "LRNBackprop", function () {
+ return /* reexport */ kernel_names["M" /* LRNBackprop */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Max", function () {
+ return /* reexport */ kernel_names["P" /* Max */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Maximum", function () {
+ return /* reexport */ kernel_names["V" /* Maximum */];
+ });
+ __webpack_require__.d(__webpack_exports__, "MaxPool", function () {
+ return /* reexport */ kernel_names["Q" /* MaxPool */];
+ });
+ __webpack_require__.d(__webpack_exports__, "MaxPoolBackprop", function () {
+ return /* reexport */ kernel_names["T" /* MaxPoolBackprop */];
+ });
+ __webpack_require__.d(__webpack_exports__, "MaxPool3D", function () {
+ return /* reexport */ kernel_names["R" /* MaxPool3D */];
+ });
+ __webpack_require__.d(__webpack_exports__, "MaxPool3DBackprop", function () {
+ return /* reexport */ kernel_names["S" /* MaxPool3DBackprop */];
+ });
+ __webpack_require__.d(__webpack_exports__, "MaxPoolWithArgmax", function () {
+ return /* reexport */ kernel_names["U" /* MaxPoolWithArgmax */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Minimum", function () {
+ return /* reexport */ kernel_names["W" /* Minimum */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Mod", function () {
+ return /* reexport */ kernel_names["X" /* Mod */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Multiply", function () {
+ return /* reexport */ kernel_names["Y" /* Multiply */];
+ });
+ __webpack_require__.d(__webpack_exports__, "NotEqual", function () {
+ return /* reexport */ kernel_names["bb" /* NotEqual */];
+ });
+ __webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV3", function () {
+ return /* reexport */ kernel_names["Z" /* NonMaxSuppressionV3 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV5", function () {
+ return /* reexport */ kernel_names["ab" /* NonMaxSuppressionV5 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "OneHot", function () {
+ return /* reexport */ kernel_names["cb" /* OneHot */];
+ });
+ __webpack_require__.d(__webpack_exports__, "PadV2", function () {
+ return /* reexport */ kernel_names["db" /* PadV2 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Pool", function () {
+ return /* reexport */ kernel_names["eb" /* Pool */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Pow", function () {
+ return /* reexport */ kernel_names["fb" /* Pow */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Prelu", function () {
+ return /* reexport */ kernel_names["gb" /* Prelu */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Real", function () {
+ return /* reexport */ kernel_names["hb" /* Real */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Relu", function () {
+ return /* reexport */ kernel_names["ib" /* Relu */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Relu6", function () {
+ return /* reexport */ kernel_names["jb" /* Relu6 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "SelectV2", function () {
+ return /* reexport */ kernel_names["kb" /* SelectV2 */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Selu", function () {
+ return /* reexport */ kernel_names["lb" /* Selu */];
+ });
+ __webpack_require__.d(__webpack_exports__, "SpaceToBatchND", function () {
+ return /* reexport */ kernel_names["mb" /* SpaceToBatchND */];
+ });
+ __webpack_require__.d(__webpack_exports__, "SplitV", function () {
+ return /* reexport */ kernel_names["nb" /* SplitV */];
+ });
+ __webpack_require__.d(__webpack_exports__, "SquaredDifference", function () {
+ return /* reexport */ kernel_names["pb" /* SquaredDifference */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Square", function () {
+ return /* reexport */ kernel_names["ob" /* Square */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Sub", function () {
+ return /* reexport */ kernel_names["qb" /* Sub */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Tile", function () {
+ return /* reexport */ kernel_names["rb" /* Tile */];
+ });
+ __webpack_require__.d(__webpack_exports__, "Transpose", function () {
+ return /* reexport */ kernel_names["sb" /* Transpose */];
+ });
+ __webpack_require__.d(__webpack_exports__, "FromPixels", function () {
+ return /* reexport */ kernel_names["E" /* FromPixels */];
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js
+ var slice_util_namespaceObject = {};
+ __webpack_require__.r(slice_util_namespaceObject);
+ __webpack_require__.d(slice_util_namespaceObject, "assertParamsValid", function () {
+ return assertParamsValid;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "maskToAxes", function () {
+ return maskToAxes;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "computeOutShape", function () {
+ return slice_util_computeOutShape;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "stridesWithElidedDims", function () {
+ return stridesWithElidedDims;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "startIndicesWithElidedDims", function () {
+ return startIndicesWithElidedDims;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "stopIndicesWithElidedDims", function () {
+ return stopIndicesWithElidedDims;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "stridesForAxis", function () {
+ return stridesForAxis;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "startForAxis", function () {
+ return startForAxis;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "stopForAxis", function () {
+ return stopForAxis;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "isSliceContinous", function () {
+ return isSliceContinous;
+ });
+ __webpack_require__.d(slice_util_namespaceObject, "computeFlatOffset", function () {
+ return computeFlatOffset;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/io/io.js
+ var io_namespaceObject = {};
+ __webpack_require__.r(io_namespaceObject);
+ __webpack_require__.d(io_namespaceObject, "copyModel", function () {
+ return copyModel;
+ });
+ __webpack_require__.d(io_namespaceObject, "listModels", function () {
+ return listModels;
+ });
+ __webpack_require__.d(io_namespaceObject, "moveModel", function () {
+ return moveModel;
+ });
+ __webpack_require__.d(io_namespaceObject, "removeModel", function () {
+ return removeModel;
+ });
+ __webpack_require__.d(io_namespaceObject, "browserFiles", function () {
+ return browserFiles;
+ });
+ __webpack_require__.d(io_namespaceObject, "browserHTTPRequest", function () {
+ return browserHTTPRequest;
+ });
+ __webpack_require__.d(io_namespaceObject, "concatenateArrayBuffers", function () {
+ return io_utils["d" /* concatenateArrayBuffers */];
+ });
+ __webpack_require__.d(io_namespaceObject, "decodeWeights", function () {
+ return io_utils["e" /* decodeWeights */];
+ });
+ __webpack_require__.d(io_namespaceObject, "encodeWeights", function () {
+ return io_utils["f" /* encodeWeights */];
+ });
+ __webpack_require__.d(io_namespaceObject, "fromMemory", function () {
+ return fromMemory;
+ });
+ __webpack_require__.d(io_namespaceObject, "getLoadHandlers", function () {
+ return getLoadHandlers;
+ });
+ __webpack_require__.d(io_namespaceObject, "getModelArtifactsInfoForJSON", function () {
+ return io_utils["g" /* getModelArtifactsInfoForJSON */];
+ });
+ __webpack_require__.d(io_namespaceObject, "getSaveHandlers", function () {
+ return getSaveHandlers;
+ });
+ __webpack_require__.d(io_namespaceObject, "http", function () {
+ return http;
+ });
+ __webpack_require__.d(io_namespaceObject, "isHTTPScheme", function () {
+ return isHTTPScheme;
+ });
+ __webpack_require__.d(io_namespaceObject, "loadWeights", function () {
+ return loadWeights;
+ });
+ __webpack_require__.d(io_namespaceObject, "registerLoadRouter", function () {
+ return registerLoadRouter;
+ });
+ __webpack_require__.d(io_namespaceObject, "registerSaveRouter", function () {
+ return registerSaveRouter;
+ });
+ __webpack_require__.d(io_namespaceObject, "weightsLoaderFactory", function () {
+ return weightsLoaderFactory;
+ });
+ __webpack_require__.d(io_namespaceObject, "withSaveHandler", function () {
+ return withSaveHandler;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/math.js
+ var math_namespaceObject = {};
+ __webpack_require__.r(math_namespaceObject);
+ __webpack_require__.d(math_namespaceObject, "confusionMatrix", function () {
+ return confusionMatrix;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/browser.js
+ var browser_namespaceObject = {};
+ __webpack_require__.r(browser_namespaceObject);
+ __webpack_require__.d(browser_namespaceObject, "toPixels", function () {
+ return toPixels;
+ });
+ __webpack_require__.d(browser_namespaceObject, "fromPixels", function () {
+ return fromPixels;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js
+ var gather_nd_util_namespaceObject = {};
+ __webpack_require__.r(gather_nd_util_namespaceObject);
+ __webpack_require__.d(gather_nd_util_namespaceObject, "prepareAndValidate", function () {
+ return prepareAndValidate;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js
+ var scatter_nd_util_namespaceObject = {};
+ __webpack_require__.r(scatter_nd_util_namespaceObject);
+ __webpack_require__.d(scatter_nd_util_namespaceObject, "validateUpdateShape", function () {
+ return validateUpdateShape;
+ });
+ __webpack_require__.d(scatter_nd_util_namespaceObject, "validateInput", function () {
+ return validateInput;
+ });
+ __webpack_require__.d(scatter_nd_util_namespaceObject, "calculateShapes", function () {
+ return calculateShapes;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/serialization.js
+ var serialization_namespaceObject = {};
+ __webpack_require__.r(serialization_namespaceObject);
+ __webpack_require__.d(serialization_namespaceObject, "Serializable", function () {
+ return Serializable;
+ });
+ __webpack_require__.d(serialization_namespaceObject, "SerializationMap", function () {
+ return SerializationMap;
+ });
+ __webpack_require__.d(serialization_namespaceObject, "registerClass", function () {
+ return registerClass;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/test_util.js
+ var test_util_namespaceObject = {};
+ __webpack_require__.r(test_util_namespaceObject);
+ __webpack_require__.d(test_util_namespaceObject, "TEST_EPSILON_FLOAT16", function () {
+ return TEST_EPSILON_FLOAT16;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "expectArraysClose", function () {
+ return expectArraysClose;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "testEpsilon", function () {
+ return testEpsilon;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "expectPromiseToFail", function () {
+ return expectPromiseToFail;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "expectArraysEqual", function () {
+ return expectArraysEqual;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "expectNumbersClose", function () {
+ return expectNumbersClose;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "expectValuesInRange", function () {
+ return expectValuesInRange;
+ });
+ __webpack_require__.d(test_util_namespaceObject, "expectArrayBuffersEqual", function () {
+ return expectArrayBuffersEqual;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js
+ var segment_util_namespaceObject = {};
+ __webpack_require__.r(segment_util_namespaceObject);
+ __webpack_require__.d(
+ segment_util_namespaceObject,
+ "segOpComputeOptimalWindowSize",
+ function () {
+ return segOpComputeOptimalWindowSize;
+ }
+ );
+ __webpack_require__.d(segment_util_namespaceObject, "computeOutShape", function () {
+ return segment_util_computeOutShape;
+ });
+ __webpack_require__.d(segment_util_namespaceObject, "collectGatherOpShapeInfo", function () {
+ return collectGatherOpShapeInfo;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/spectral_ops.js
+ var spectral_ops_namespaceObject = {};
+ __webpack_require__.r(spectral_ops_namespaceObject);
+ __webpack_require__.d(spectral_ops_namespaceObject, "fft", function () {
+ return fft;
+ });
+ __webpack_require__.d(spectral_ops_namespaceObject, "ifft", function () {
+ return ifft;
+ });
+ __webpack_require__.d(spectral_ops_namespaceObject, "rfft", function () {
+ return rfft;
+ });
+ __webpack_require__.d(spectral_ops_namespaceObject, "irfft", function () {
+ return irfft;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops.js
+ var signal_ops_namespaceObject = {};
+ __webpack_require__.r(signal_ops_namespaceObject);
+ __webpack_require__.d(signal_ops_namespaceObject, "hannWindow", function () {
+ return hannWindow;
+ });
+ __webpack_require__.d(signal_ops_namespaceObject, "hammingWindow", function () {
+ return hammingWindow;
+ });
+ __webpack_require__.d(signal_ops_namespaceObject, "frame", function () {
+ return signal_ops_frame;
+ });
+ __webpack_require__.d(signal_ops_namespaceObject, "stft", function () {
+ return stft;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops.js
+ var loss_ops_namespaceObject = {};
+ __webpack_require__.r(loss_ops_namespaceObject);
+ __webpack_require__.d(loss_ops_namespaceObject, "Reduction", function () {
+ return Reduction;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "absoluteDifference", function () {
+ return absoluteDifference;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "computeWeightedLoss", function () {
+ return computeWeightedLoss;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "cosineDistance", function () {
+ return cosineDistance;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "hingeLoss", function () {
+ return hingeLoss;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "huberLoss", function () {
+ return huberLoss;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "logLoss", function () {
+ return logLoss;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "meanSquaredError", function () {
+ return meanSquaredError;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "sigmoidCrossEntropy", function () {
+ return sigmoidCrossEntropy;
+ });
+ __webpack_require__.d(loss_ops_namespaceObject, "softmaxCrossEntropy", function () {
+ return softmaxCrossEntropy;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/linalg_ops.js
+ var linalg_ops_namespaceObject = {};
+ __webpack_require__.r(linalg_ops_namespaceObject);
+ __webpack_require__.d(linalg_ops_namespaceObject, "bandPart", function () {
+ return bandPart;
+ });
+ __webpack_require__.d(linalg_ops_namespaceObject, "gramSchmidt", function () {
+ return gramSchmidt;
+ });
+ __webpack_require__.d(linalg_ops_namespaceObject, "qr", function () {
+ return qr;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/image_ops.js
+ var image_ops_namespaceObject = {};
+ __webpack_require__.r(image_ops_namespaceObject);
+ __webpack_require__.d(image_ops_namespaceObject, "nonMaxSuppression", function () {
+ return nonMaxSuppression;
+ });
+ __webpack_require__.d(image_ops_namespaceObject, "resizeBilinear", function () {
+ return resizeBilinear;
+ });
+ __webpack_require__.d(image_ops_namespaceObject, "resizeNearestNeighbor", function () {
+ return resizeNearestNeighbor;
+ });
+ __webpack_require__.d(image_ops_namespaceObject, "nonMaxSuppressionAsync", function () {
+ return nonMaxSuppressionAsync;
+ });
+ __webpack_require__.d(image_ops_namespaceObject, "nonMaxSuppressionWithScore", function () {
+ return nonMaxSuppressionWithScore;
+ });
+ __webpack_require__.d(
+ image_ops_namespaceObject,
+ "nonMaxSuppressionWithScoreAsync",
+ function () {
+ return nonMaxSuppressionWithScoreAsync;
+ }
+ );
+ __webpack_require__.d(image_ops_namespaceObject, "cropAndResize", function () {
+ return cropAndResize;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js
+ var fused_ops_namespaceObject = {};
+ __webpack_require__.r(fused_ops_namespaceObject);
+ __webpack_require__.d(fused_ops_namespaceObject, "matMul", function () {
+ return fused_ops_matMul;
+ });
+ __webpack_require__.d(fused_ops_namespaceObject, "conv2d", function () {
+ return fused_ops_conv2d;
+ });
+ __webpack_require__.d(fused_ops_namespaceObject, "depthwiseConv2d", function () {
+ return fused_ops_depthwiseConv2d;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/ops.js
+ var ops_namespaceObject = {};
+ __webpack_require__.r(ops_namespaceObject);
+ __webpack_require__.d(ops_namespaceObject, "add", function () {
+ return add;
+ });
+ __webpack_require__.d(ops_namespaceObject, "addN", function () {
+ return addN;
+ });
+ __webpack_require__.d(ops_namespaceObject, "atan2", function () {
+ return atan2;
+ });
+ __webpack_require__.d(ops_namespaceObject, "avgPool", function () {
+ return avgPool;
+ });
+ __webpack_require__.d(ops_namespaceObject, "avgPool3d", function () {
+ return avgPool3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "batchToSpaceND", function () {
+ return batchToSpaceND;
+ });
+ __webpack_require__.d(ops_namespaceObject, "batchNorm", function () {
+ return batchNorm;
+ });
+ __webpack_require__.d(ops_namespaceObject, "batchNorm2d", function () {
+ return batchNorm2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "batchNorm3d", function () {
+ return batchNorm3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "batchNorm4d", function () {
+ return batchNorm4d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "broadcastTo", function () {
+ return broadcastTo;
+ });
+ __webpack_require__.d(ops_namespaceObject, "clone", function () {
+ return clone;
+ });
+ __webpack_require__.d(ops_namespaceObject, "complex", function () {
+ return complex["a" /* complex */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "concat", function () {
+ return concat;
+ });
+ __webpack_require__.d(ops_namespaceObject, "concat1d", function () {
+ return concat1d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "concat2d", function () {
+ return concat2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "concat3d", function () {
+ return concat3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "concat4d", function () {
+ return concat4d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "conv1d", function () {
+ return conv1d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "conv2d", function () {
+ return conv2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "conv2dTranspose", function () {
+ return conv2dTranspose;
+ });
+ __webpack_require__.d(ops_namespaceObject, "conv3d", function () {
+ return conv3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "conv3dTranspose", function () {
+ return conv3dTranspose;
+ });
+ __webpack_require__.d(ops_namespaceObject, "cumsum", function () {
+ return cumsum;
+ });
+ __webpack_require__.d(ops_namespaceObject, "depthToSpace", function () {
+ return depthToSpace;
+ });
+ __webpack_require__.d(ops_namespaceObject, "depthwiseConv2d", function () {
+ return depthwiseConv2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "diag", function () {
+ return diag;
+ });
+ __webpack_require__.d(ops_namespaceObject, "div", function () {
+ return div;
+ });
+ __webpack_require__.d(ops_namespaceObject, "divNoNan", function () {
+ return divNoNan;
+ });
+ __webpack_require__.d(ops_namespaceObject, "dot", function () {
+ return dot;
+ });
+ __webpack_require__.d(ops_namespaceObject, "elu", function () {
+ return elu;
+ });
+ __webpack_require__.d(ops_namespaceObject, "equal", function () {
+ return equal;
+ });
+ __webpack_require__.d(ops_namespaceObject, "eye", function () {
+ return eye;
+ });
+ __webpack_require__.d(ops_namespaceObject, "fill", function () {
+ return fill;
+ });
+ __webpack_require__.d(ops_namespaceObject, "floorDiv", function () {
+ return floorDiv;
+ });
+ __webpack_require__.d(ops_namespaceObject, "greater", function () {
+ return greater;
+ });
+ __webpack_require__.d(ops_namespaceObject, "greaterEqual", function () {
+ return greaterEqual;
+ });
+ __webpack_require__.d(ops_namespaceObject, "imag", function () {
+ return imag["a" /* imag */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "leakyRelu", function () {
+ return leakyRelu;
+ });
+ __webpack_require__.d(ops_namespaceObject, "less", function () {
+ return less;
+ });
+ __webpack_require__.d(ops_namespaceObject, "lessEqual", function () {
+ return lessEqual;
+ });
+ __webpack_require__.d(ops_namespaceObject, "localResponseNormalization", function () {
+ return localResponseNormalization;
+ });
+ __webpack_require__.d(ops_namespaceObject, "matMul", function () {
+ return matMul;
+ });
+ __webpack_require__.d(ops_namespaceObject, "max", function () {
+ return max_max;
+ });
+ __webpack_require__.d(ops_namespaceObject, "maxPool", function () {
+ return maxPool;
+ });
+ __webpack_require__.d(ops_namespaceObject, "maxPool3d", function () {
+ return maxPool3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "maxPoolWithArgmax", function () {
+ return maxPoolWithArgmax;
+ });
+ __webpack_require__.d(ops_namespaceObject, "maximum", function () {
+ return maximum;
+ });
+ __webpack_require__.d(ops_namespaceObject, "minimum", function () {
+ return minimum;
+ });
+ __webpack_require__.d(ops_namespaceObject, "mod", function () {
+ return mod;
+ });
+ __webpack_require__.d(ops_namespaceObject, "mul", function () {
+ return mul;
+ });
+ __webpack_require__.d(ops_namespaceObject, "multinomial", function () {
+ return multinomial;
+ });
+ __webpack_require__.d(ops_namespaceObject, "notEqual", function () {
+ return notEqual;
+ });
+ __webpack_require__.d(ops_namespaceObject, "oneHot", function () {
+ return oneHot;
+ });
+ __webpack_require__.d(ops_namespaceObject, "outerProduct", function () {
+ return outerProduct;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pad", function () {
+ return pad_pad;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pad1d", function () {
+ return pad1d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pad2d", function () {
+ return pad2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pad3d", function () {
+ return pad3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pad4d", function () {
+ return pad4d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pool", function () {
+ return pool;
+ });
+ __webpack_require__.d(ops_namespaceObject, "pow", function () {
+ return pow;
+ });
+ __webpack_require__.d(ops_namespaceObject, "prelu", function () {
+ return prelu;
+ });
+ __webpack_require__.d(ops_namespaceObject, "rand", function () {
+ return rand;
+ });
+ __webpack_require__.d(ops_namespaceObject, "randomGamma", function () {
+ return randomGamma;
+ });
+ __webpack_require__.d(ops_namespaceObject, "randomNormal", function () {
+ return randomNormal;
+ });
+ __webpack_require__.d(ops_namespaceObject, "randomUniform", function () {
+ return randomUniform;
+ });
+ __webpack_require__.d(ops_namespaceObject, "real", function () {
+ return real["a" /* real */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "relu", function () {
+ return relu;
+ });
+ __webpack_require__.d(ops_namespaceObject, "relu6", function () {
+ return relu6;
+ });
+ __webpack_require__.d(ops_namespaceObject, "selu", function () {
+ return selu;
+ });
+ __webpack_require__.d(ops_namespaceObject, "separableConv2d", function () {
+ return separableConv2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "spaceToBatchND", function () {
+ return spaceToBatchND;
+ });
+ __webpack_require__.d(ops_namespaceObject, "split", function () {
+ return split;
+ });
+ __webpack_require__.d(ops_namespaceObject, "square", function () {
+ return square;
+ });
+ __webpack_require__.d(ops_namespaceObject, "squaredDifference", function () {
+ return squaredDifference;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sub", function () {
+ return sub;
+ });
+ __webpack_require__.d(ops_namespaceObject, "tile", function () {
+ return tile;
+ });
+ __webpack_require__.d(ops_namespaceObject, "truncatedNormal", function () {
+ return truncatedNormal;
+ });
+ __webpack_require__.d(ops_namespaceObject, "booleanMaskAsync", function () {
+ return booleanMaskAsync;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reverse", function () {
+ return reverse_reverse;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reverse1d", function () {
+ return reverse1d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reverse2d", function () {
+ return reverse2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reverse3d", function () {
+ return reverse3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reverse4d", function () {
+ return reverse4d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "slice", function () {
+ return slice;
+ });
+ __webpack_require__.d(ops_namespaceObject, "slice1d", function () {
+ return slice1d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "slice2d", function () {
+ return slice2d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "slice3d", function () {
+ return slice3d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "slice4d", function () {
+ return slice4d;
+ });
+ __webpack_require__.d(ops_namespaceObject, "abs", function () {
+ return abs;
+ });
+ __webpack_require__.d(ops_namespaceObject, "acos", function () {
+ return acos;
+ });
+ __webpack_require__.d(ops_namespaceObject, "acosh", function () {
+ return acosh;
+ });
+ __webpack_require__.d(ops_namespaceObject, "asin", function () {
+ return asin;
+ });
+ __webpack_require__.d(ops_namespaceObject, "asinh", function () {
+ return asinh;
+ });
+ __webpack_require__.d(ops_namespaceObject, "atan", function () {
+ return atan;
+ });
+ __webpack_require__.d(ops_namespaceObject, "atanh", function () {
+ return atanh;
+ });
+ __webpack_require__.d(ops_namespaceObject, "ceil", function () {
+ return ceil;
+ });
+ __webpack_require__.d(ops_namespaceObject, "clipByValue", function () {
+ return clipByValue;
+ });
+ __webpack_require__.d(ops_namespaceObject, "cos", function () {
+ return cos;
+ });
+ __webpack_require__.d(ops_namespaceObject, "cosh", function () {
+ return cosh;
+ });
+ __webpack_require__.d(ops_namespaceObject, "erf", function () {
+ return erf;
+ });
+ __webpack_require__.d(ops_namespaceObject, "exp", function () {
+ return unary_ops_exp;
+ });
+ __webpack_require__.d(ops_namespaceObject, "expm1", function () {
+ return expm1;
+ });
+ __webpack_require__.d(ops_namespaceObject, "floor", function () {
+ return floor;
+ });
+ __webpack_require__.d(ops_namespaceObject, "log", function () {
+ return log;
+ });
+ __webpack_require__.d(ops_namespaceObject, "log1p", function () {
+ return log1p;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logSigmoid", function () {
+ return logSigmoid;
+ });
+ __webpack_require__.d(ops_namespaceObject, "neg", function () {
+ return neg;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reciprocal", function () {
+ return reciprocal;
+ });
+ __webpack_require__.d(ops_namespaceObject, "round", function () {
+ return round;
+ });
+ __webpack_require__.d(ops_namespaceObject, "rsqrt", function () {
+ return rsqrt;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sigmoid", function () {
+ return sigmoid;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sign", function () {
+ return sign;
+ });
+ __webpack_require__.d(ops_namespaceObject, "isNaN", function () {
+ return unary_ops_isNaN;
+ });
+ __webpack_require__.d(ops_namespaceObject, "isInf", function () {
+ return isInf;
+ });
+ __webpack_require__.d(ops_namespaceObject, "isFinite", function () {
+ return unary_ops_isFinite;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sin", function () {
+ return sin;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sinh", function () {
+ return sinh;
+ });
+ __webpack_require__.d(ops_namespaceObject, "softplus", function () {
+ return softplus;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sqrt", function () {
+ return sqrt;
+ });
+ __webpack_require__.d(ops_namespaceObject, "step", function () {
+ return unary_ops_step;
+ });
+ __webpack_require__.d(ops_namespaceObject, "tan", function () {
+ return tan;
+ });
+ __webpack_require__.d(ops_namespaceObject, "tanh", function () {
+ return tanh;
+ });
+ __webpack_require__.d(ops_namespaceObject, "all", function () {
+ return reduction_ops_all;
+ });
+ __webpack_require__.d(ops_namespaceObject, "any", function () {
+ return any;
+ });
+ __webpack_require__.d(ops_namespaceObject, "argMax", function () {
+ return argMax;
+ });
+ __webpack_require__.d(ops_namespaceObject, "argMin", function () {
+ return argMin;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logSumExp", function () {
+ return logSumExp;
+ });
+ __webpack_require__.d(ops_namespaceObject, "mean", function () {
+ return reduction_ops_mean;
+ });
+ __webpack_require__.d(ops_namespaceObject, "min", function () {
+ return reduction_ops_min;
+ });
+ __webpack_require__.d(ops_namespaceObject, "moments", function () {
+ return moments;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sum", function () {
+ return sum;
+ });
+ __webpack_require__.d(ops_namespaceObject, "prod", function () {
+ return reduction_ops_prod;
+ });
+ __webpack_require__.d(ops_namespaceObject, "equalStrict", function () {
+ return equalStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "greaterEqualStrict", function () {
+ return greaterEqualStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "greaterStrict", function () {
+ return greaterStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "lessEqualStrict", function () {
+ return lessEqualStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "lessStrict", function () {
+ return lessStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "notEqualStrict", function () {
+ return notEqualStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "addStrict", function () {
+ return addStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "divStrict", function () {
+ return divStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "maximumStrict", function () {
+ return maximumStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "minimumStrict", function () {
+ return minimumStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "modStrict", function () {
+ return modStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "mulStrict", function () {
+ return mulStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "powStrict", function () {
+ return powStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "squaredDifferenceStrict", function () {
+ return squaredDifferenceStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "subStrict", function () {
+ return subStrict;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logicalAnd", function () {
+ return logicalAnd;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logicalNot", function () {
+ return logicalNot;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logicalOr", function () {
+ return logicalOr;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logicalXor", function () {
+ return logicalXor;
+ });
+ __webpack_require__.d(ops_namespaceObject, "where", function () {
+ return where;
+ });
+ __webpack_require__.d(ops_namespaceObject, "whereAsync", function () {
+ return whereAsync;
+ });
+ __webpack_require__.d(ops_namespaceObject, "buffer", function () {
+ return array_ops_buffer;
+ });
+ __webpack_require__.d(ops_namespaceObject, "print", function () {
+ return print;
+ });
+ __webpack_require__.d(ops_namespaceObject, "cast", function () {
+ return cast;
+ });
+ __webpack_require__.d(ops_namespaceObject, "expandDims", function () {
+ return expandDims;
+ });
+ __webpack_require__.d(ops_namespaceObject, "reshape", function () {
+ return reshape;
+ });
+ __webpack_require__.d(ops_namespaceObject, "squeeze", function () {
+ return squeeze;
+ });
+ __webpack_require__.d(ops_namespaceObject, "stack", function () {
+ return stack;
+ });
+ __webpack_require__.d(ops_namespaceObject, "unstack", function () {
+ return unstack;
+ });
+ __webpack_require__.d(ops_namespaceObject, "setdiff1dAsync", function () {
+ return setdiff1dAsync;
+ });
+ __webpack_require__.d(ops_namespaceObject, "linspace", function () {
+ return tensor_ops["a" /* linspace */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "ones", function () {
+ return tensor_ops["b" /* ones */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "range", function () {
+ return tensor_ops["d" /* range */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "scalar", function () {
+ return tensor_ops["e" /* scalar */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor", function () {
+ return tensor_ops["f" /* tensor */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor1d", function () {
+ return tensor_ops["g" /* tensor1d */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor2d", function () {
+ return tensor_ops["h" /* tensor2d */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor3d", function () {
+ return tensor_ops["i" /* tensor3d */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor4d", function () {
+ return tensor_ops["j" /* tensor4d */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor5d", function () {
+ return tensor_ops["k" /* tensor5d */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "tensor6d", function () {
+ return tensor_ops["l" /* tensor6d */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "variable", function () {
+ return tensor_ops["m" /* variable */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "zeros", function () {
+ return tensor_ops["n" /* zeros */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "onesLike", function () {
+ return tensor_ops["c" /* onesLike */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "zerosLike", function () {
+ return tensor_ops["o" /* zerosLike */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "transpose", function () {
+ return transpose;
+ });
+ __webpack_require__.d(ops_namespaceObject, "softmax", function () {
+ return softmax;
+ });
+ __webpack_require__.d(ops_namespaceObject, "logSoftmax", function () {
+ return logSoftmax;
+ });
+ __webpack_require__.d(ops_namespaceObject, "norm", function () {
+ return norm_norm;
+ });
+ __webpack_require__.d(ops_namespaceObject, "gather", function () {
+ return gather;
+ });
+ __webpack_require__.d(ops_namespaceObject, "unsortedSegmentSum", function () {
+ return unsortedSegmentSum;
+ });
+ __webpack_require__.d(ops_namespaceObject, "basicLSTMCell", function () {
+ return basicLSTMCell;
+ });
+ __webpack_require__.d(ops_namespaceObject, "multiRNNCell", function () {
+ return multiRNNCell;
+ });
+ __webpack_require__.d(ops_namespaceObject, "movingAverage", function () {
+ return movingAverage;
+ });
+ __webpack_require__.d(ops_namespaceObject, "stridedSlice", function () {
+ return stridedSlice;
+ });
+ __webpack_require__.d(ops_namespaceObject, "topk", function () {
+ return topk;
+ });
+ __webpack_require__.d(ops_namespaceObject, "scatterND", function () {
+ return scatterND;
+ });
+ __webpack_require__.d(ops_namespaceObject, "fft", function () {
+ return fft;
+ });
+ __webpack_require__.d(ops_namespaceObject, "ifft", function () {
+ return ifft;
+ });
+ __webpack_require__.d(ops_namespaceObject, "rfft", function () {
+ return rfft;
+ });
+ __webpack_require__.d(ops_namespaceObject, "irfft", function () {
+ return irfft;
+ });
+ __webpack_require__.d(ops_namespaceObject, "sparseToDense", function () {
+ return sparseToDense;
+ });
+ __webpack_require__.d(ops_namespaceObject, "gatherND", function () {
+ return gatherND;
+ });
+ __webpack_require__.d(ops_namespaceObject, "dropout", function () {
+ return dropout;
+ });
+ __webpack_require__.d(ops_namespaceObject, "hannWindow", function () {
+ return hannWindow;
+ });
+ __webpack_require__.d(ops_namespaceObject, "hammingWindow", function () {
+ return hammingWindow;
+ });
+ __webpack_require__.d(ops_namespaceObject, "frame", function () {
+ return signal_ops_frame;
+ });
+ __webpack_require__.d(ops_namespaceObject, "stft", function () {
+ return stft;
+ });
+ __webpack_require__.d(ops_namespaceObject, "inTopKAsync", function () {
+ return inTopKAsync;
+ });
+ __webpack_require__.d(ops_namespaceObject, "op", function () {
+ return operation["a" /* op */];
+ });
+ __webpack_require__.d(ops_namespaceObject, "image", function () {
+ return image_ops_namespaceObject;
+ });
+ __webpack_require__.d(ops_namespaceObject, "linalg", function () {
+ return linalg_ops_namespaceObject;
+ });
+ __webpack_require__.d(ops_namespaceObject, "losses", function () {
+ return loss_ops_namespaceObject;
+ });
+ __webpack_require__.d(ops_namespaceObject, "spectral", function () {
+ return spectral_ops_namespaceObject;
+ });
+ __webpack_require__.d(ops_namespaceObject, "fused", function () {
+ return fused_ops_namespaceObject;
+ });
+ __webpack_require__.d(ops_namespaceObject, "signal", function () {
+ return signal_ops_namespaceObject;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js
+ var backend_util_namespaceObject = {};
+ __webpack_require__.r(backend_util_namespaceObject);
+ __webpack_require__.d(backend_util_namespaceObject, "axesAreInnerMostDims", function () {
+ return axesAreInnerMostDims;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "combineLocations", function () {
+ return combineLocations;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computeOutAndReduceShapes", function () {
+ return computeOutAndReduceShapes;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "expandShapeToKeepDim", function () {
+ return expandShapeToKeepDim;
+ });
+ __webpack_require__.d(
+ backend_util_namespaceObject,
+ "assertAxesAreInnerMostDims",
+ function () {
+ return assertAxesAreInnerMostDims;
+ }
+ );
+ __webpack_require__.d(backend_util_namespaceObject, "getAxesPermutation", function () {
+ return getAxesPermutation;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getUndoAxesPermutation", function () {
+ return getUndoAxesPermutation;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getInnerMostAxes", function () {
+ return getInnerMostAxes;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getBroadcastDims", function () {
+ return getBroadcastDims;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getReductionAxes", function () {
+ return getReductionAxes;
+ });
+ __webpack_require__.d(
+ backend_util_namespaceObject,
+ "assertAndGetBroadcastShape",
+ function () {
+ return assertAndGetBroadcastShape;
+ }
+ );
+ __webpack_require__.d(backend_util_namespaceObject, "assertParamsConsistent", function () {
+ return assertParamsConsistent;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computeOutShape", function () {
+ return computeOutShape;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computePool2DInfo", function () {
+ return computePool2DInfo;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computePool3DInfo", function () {
+ return computePool3DInfo;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computeConv2DInfo", function () {
+ return computeConv2DInfo;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computeConv3DInfo", function () {
+ return computeConv3DInfo;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computeDefaultPad", function () {
+ return computeDefaultPad;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "tupleValuesAreOne", function () {
+ return tupleValuesAreOne;
+ });
+ __webpack_require__.d(
+ backend_util_namespaceObject,
+ "eitherStridesOrDilationsAreOne",
+ function () {
+ return eitherStridesOrDilationsAreOne;
+ }
+ );
+ __webpack_require__.d(backend_util_namespaceObject, "convertConv2DDataFormat", function () {
+ return convertConv2DDataFormat;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "PARALLELIZE_THRESHOLD", function () {
+ return PARALLELIZE_THRESHOLD;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "computeOptimalWindowSize", function () {
+ return computeOptimalWindowSize;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "nonMaxSuppressionV3", function () {
+ return nonMaxSuppressionV3;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "nonMaxSuppressionV5", function () {
+ return nonMaxSuppressionV5;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "upcastType", function () {
+ return dist_types["c" /* upcastType */];
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getReshaped", function () {
+ return getReshaped;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getPermuted", function () {
+ return getPermuted;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getReshapedPermuted", function () {
+ return getReshapedPermuted;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getSliceBeginCoords", function () {
+ return getSliceBeginCoords;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getSliceSize", function () {
+ return getSliceSize;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "prepareAndValidate", function () {
+ return prepareAndValidate;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "validateUpdateShape", function () {
+ return validateUpdateShape;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "validateInput", function () {
+ return validateInput;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "calculateShapes", function () {
+ return calculateShapes;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "SELU_SCALEALPHA", function () {
+ return SELU_SCALEALPHA;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "SELU_SCALE", function () {
+ return SELU_SCALE;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "shouldFuse", function () {
+ return shouldFuse;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "ERF_P", function () {
+ return ERF_P;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "ERF_A1", function () {
+ return ERF_A1;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "ERF_A2", function () {
+ return ERF_A2;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "ERF_A3", function () {
+ return ERF_A3;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "ERF_A4", function () {
+ return ERF_A4;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "ERF_A5", function () {
+ return ERF_A5;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "warn", function () {
+ return warn;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "log", function () {
+ return log_log;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "mergeRealAndImagArrays", function () {
+ return mergeRealAndImagArrays;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "splitRealAndImagArrays", function () {
+ return splitRealAndImagArrays;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "complexWithEvenIndex", function () {
+ return complexWithEvenIndex;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "complexWithOddIndex", function () {
+ return complexWithOddIndex;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "getComplexWithIndex", function () {
+ return getComplexWithIndex;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "assignToTypedArray", function () {
+ return assignToTypedArray;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "exponents", function () {
+ return exponents;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "exponent", function () {
+ return exponent;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "segment_util", function () {
+ return segment_util_namespaceObject;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "castTensor", function () {
+ return castTensor;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "reshapeTensor", function () {
+ return reshapeTensor;
+ });
+ __webpack_require__.d(backend_util_namespaceObject, "linspaceImpl", function () {
+ return linspaceImpl;
+ });
+
+ // NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js
+ var kernel_impls_namespaceObject = {};
+ __webpack_require__.r(kernel_impls_namespaceObject);
+ __webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV3", function () {
+ return nonMaxSuppressionV3;
+ });
+ __webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV5", function () {
+ return nonMaxSuppressionV5;
+ });
+ __webpack_require__.d(kernel_impls_namespaceObject, "split", function () {
+ return split_shared_split;
+ });
+ __webpack_require__.d(kernel_impls_namespaceObject, "tile", function () {
+ return tile_impl_tile;
+ });
+ __webpack_require__.d(kernel_impls_namespaceObject, "topkImpl", function () {
+ return topkImpl;
+ });
+ __webpack_require__.d(kernel_impls_namespaceObject, "whereImpl", function () {
+ return whereImpl;
+ });
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/engine.js + 2 modules
+ var engine = __webpack_require__(5);
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/flags.js
+ var flags = __webpack_require__(61);
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_names.js
+ var kernel_names = __webpack_require__(6);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js
+ /**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the dimensions in the input shape that are broadcasted to
+ * produce the provided output shape.
+ *
+ * The returned dimensions are 0-indexed and sorted. An example:
+ * inShape = [4, 1, 3]
+ * outShape = [5, 4, 3, 3]
+ * result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.
+ */
+ function getBroadcastDims(inShape, outShape) {
+ const inRank = inShape.length;
+ const dims = [];
+ for (let i = 0; i < inRank; i++) {
+ const dim = inRank - 1 - i;
+ const a = inShape[dim] || 1;
+ const b = outShape[outShape.length - 1 - i] || 1;
+ if (b > 1 && a === 1) {
+ dims.unshift(dim);
+ }
+ }
+ return dims;
+ }
+ /**
+ * Returns the axes in the output space that should be reduced to produce
+ * the input space.
+ */
+ function getReductionAxes(inShape, outShape) {
+ const result = [];
+ for (let i = 0; i < outShape.length; i++) {
+ const inDim = inShape[inShape.length - i - 1];
+ const outAxis = outShape.length - i - 1;
+ const outDim = outShape[outAxis];
+ if (inDim == null || (inDim === 1 && outDim > 1)) {
+ result.unshift(outAxis);
+ }
+ }
+ return result;
+ }
+ function assertAndGetBroadcastShape(shapeA, shapeB) {
+ const result = [];
+ const l = Math.max(shapeA.length, shapeB.length);
+ for (let i = 0; i < l; i++) {
+ let a = shapeA[shapeA.length - i - 1];
+ if (a == null) {
+ a = 1;
+ }
+ let b = shapeB[shapeB.length - i - 1];
+ if (b == null) {
+ b = 1;
+ }
+ if (a === 1) {
+ result.unshift(b);
+ } else if (b === 1) {
+ result.unshift(a);
+ } else if (a !== b) {
+ const errMsg =
+ `Operands could not be broadcast together with shapes ` + `${shapeA} and ${shapeB}.`;
+ throw Error(errMsg);
+ } else {
+ result.unshift(a);
+ }
+ }
+ return result;
+ }
+ //# sourceMappingURL=broadcast_util.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Add_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const addGradConfig = {
+ kernelName: kernel_names["a" /* Add */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ let res = dy;
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = res.sum(reduceAxes);
+ }
+ return res.reshape(a.shape);
+ };
+ const derB = () => {
+ let res = dy;
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = res.sum(reduceAxes);
+ }
+ return res.reshape(b.shape);
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Add_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/AddN_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const addNGradConfig = {
+ kernelName: kernel_names["b" /* AddN */],
+ saveAllInputs: true,
+ gradFunc: (dy, saved) => {
+ const ders = {};
+ saved.forEach((_, i) => {
+ ders[i] = () => dy.clone();
+ });
+ return ders;
+ },
+ };
+ //# sourceMappingURL=AddN_grad.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util.js
+ var tensor_util = __webpack_require__(11);
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js
+ var tensor_util_env = __webpack_require__(3);
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/operation.js
+ var operation = __webpack_require__(4);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/add.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
+ *
+ * a.add(b).print(); // or tf.add(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast add a with b.
+ * const a = tf.scalar(5);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
+ *
+ * a.add(b).print(); // or tf.add(a, b)
+ * ```
+ * @param a The first `tf.Tensor` to add.
+ * @param b The second `tf.Tensor` to add. Must have the same type as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
+ function add_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "add");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "add");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ const forward = (backend, save) => {
+ const res = backend.add($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["a" /* Add */]
+ );
+ }
+ const add = Object(operation["a" /* op */])({ add_ });
+ //# sourceMappingURL=add.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor.js + 1 modules
+ var dist_tensor = __webpack_require__(7);
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util.js
+ var util = __webpack_require__(1);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js
+ /**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ function assertParamsConsistent(shapes, axis) {
+ const rank = shapes[0].length;
+ shapes.forEach((shape, i) => {
+ util["assert"](
+ shape.length === rank,
+ () =>
+ `Error in concat${rank}D: rank of tensors[${i}] must be the same ` +
+ `as the rank of the rest (${rank})`
+ );
+ });
+ util["assert"](
+ axis >= 0 && axis < rank,
+ () => `Error in concat${rank}D: axis must be between 0 and ${rank - 1}.`
+ );
+ const firstShape = shapes[0];
+ shapes.forEach((shape, i) => {
+ for (let r = 0; r < rank; r++) {
+ util["assert"](
+ r === axis || shape[r] === firstShape[r],
+ () =>
+ `Error in concat${rank}D: Shape of tensors[${i}] (${shape}) ` +
+ `does not match the shape of the rest (${firstShape}) ` +
+ `along the non-concatenated axis ${i}.`
+ );
+ }
+ });
+ }
+ function computeOutShape(shapes, axis) {
+ const outputShape = shapes[0].slice();
+ for (let i = 1; i < shapes.length; i++) {
+ outputShape[axis] += shapes[i][axis];
+ }
+ return outputShape;
+ }
+ //# sourceMappingURL=concat_util.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops.js
+ var tensor_ops = __webpack_require__(8);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Concatenates a list of `tf.Tensor`s along a given axis.
+ *
+ * The tensors ranks and types must match, and their sizes must match in all
+ * dimensions except `axis`.
+ *
+ * Also available are stricter rank-specific methods that assert that
+ * `tensors` are of the given rank:
+ * - `tf.concat1d`
+ * - `tf.concat2d`
+ * - `tf.concat3d`
+ * - `tf.concat4d`
+ *
+ * Except `tf.concat1d` (which does not have axis param), all methods have
+ * same signature as this method.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * a.concat(b).print(); // or a.concat(b)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.concat([a, b, c]).print();
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 2], [10, 20]]);
+ * const b = tf.tensor2d([[3, 4], [30, 40]]);
+ * const axis = 1;
+ * tf.concat([a, b], axis).print();
+ * ```
+ * @param tensors A list of tensors to concatenate.
+ * @param axis The axis to concate along. Defaults to 0 (the first dim).
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
+ function concat_(tensors, axis = 0) {
+ Object(util["assert"])(tensors.length >= 1, () => "Pass at least one tensor to concat");
+ let $tensors = Object(tensor_util_env["b" /* convertToTensorArray */])(
+ tensors,
+ "tensors",
+ "concat"
+ );
+ if ($tensors[0].dtype === "complex64") {
+ $tensors.forEach((tensor) => {
+ if (tensor.dtype !== "complex64") {
+ throw new Error(`Cannot concatenate complex64 tensors with a tensor
+ with dtype ${tensor.dtype}. `);
+ }
+ });
+ }
+ const $axis = Object(util["parseAxisParam"])(axis, $tensors[0].shape)[0];
+ const outShape = computeOutShape(
+ $tensors.map((t) => t.shape),
+ $axis
+ );
+ if (Object(util["sizeFromShape"])(outShape) === 0) {
+ return Object(tensor_ops["f" /* tensor */])([], outShape);
+ }
+ // Keep only non-empty tensors (ignore tensors with 0 in their shape).
+ $tensors = $tensors.filter((t) => t.size > 0);
+ if ($tensors.length === 1) {
+ return $tensors[0];
+ }
+ const shapes = $tensors.map((t) => t.shape);
+ assertParamsConsistent(shapes, $axis);
+ const forward = (backend, save) => {
+ const res = backend.concat($tensors, $axis);
+ save($tensors);
+ return res;
+ };
+ const inputs = $tensors;
+ const attr = { axis };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["l" /* Concat */],
+ attr
+ );
+ }
+ const concat = Object(operation["a" /* op */])({ concat_ });
+ //# sourceMappingURL=concat.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/array_ops.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Reshapes a `tf.Tensor` to a given shape.
+ *
+ * Given an input tensor, returns a new tensor with the same values as the
+ * input tensor with shape `shape`.
+ *
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component of
+ * shape can be -1.
+ *
+ * If shape is 1-D or higher, then the operation returns a tensor with shape
+ * shape filled with the values of tensor. In this case, the number of
+ * elements implied by shape must be the same as the number of elements in
+ * tensor.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * x.reshape([2, 2]).print();
+ * ```
+ *
+ * @param x The input tensor to be reshaped.
+ * @param shape An array of integers defining the output tensor shape.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function reshape_(x, shape) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "reshape", null);
+ shape = util["inferFromImplicitShape"](shape, $x.size);
+ util["assert"](
+ $x.size === util["sizeFromShape"](shape),
+ () => "new shape and old shape must have the same number of elements."
+ );
+ const grad = (dy) => {
+ return { x: () => dy.reshape($x.shape) };
+ };
+ const attrs = { shape };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.reshape($x, shape),
+ { x: $x },
+ grad,
+ "Reshape",
+ attrs
+ );
+ }
+ /**
+ * Removes dimensions of size 1 from the shape of a `tf.Tensor`.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]);
+ * x.squeeze().print();
+ * ```
+ *
+ * @param x The input tensor to be squeezed.
+ * @param axis An optional list of numbers. If specified, only
+ * squeezes the dimensions listed. The dimension index starts at 0. It
+ * is an error to squeeze a dimension that is not 1.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function squeeze_(x, axis) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "squeeze");
+ return reshape($x, util["squeezeShape"]($x.shape, axis).newShape);
+ }
+ /**
+ * Casts a `tf.Tensor` to a new dtype.
+ *
+ * ```js
+ * const x = tf.tensor1d([1.5, 2.5, 3]);
+ * tf.cast(x, 'int32').print();
+ * ```
+ * @param x The input tensor to be casted.
+ * @param dtype The dtype to cast the input tensor to.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function cast_(x, dtype) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cast");
+ // Sanity checks.
+ if (!util["isValidDtype"](dtype)) {
+ throw new Error(`Failed to cast to unknown dtype ${dtype}`);
+ }
+ if (
+ (dtype === "string" && $x.dtype !== "string") ||
+ (dtype !== "string" && $x.dtype === "string")
+ ) {
+ throw new Error("Only strings can be casted to strings");
+ }
+ const grad = (dy) => {
+ return { x: () => dy.clone() };
+ };
+ const attrs = { dtype };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.cast($x, dtype),
+ { x: $x },
+ grad,
+ "Cast",
+ attrs
+ );
+ }
+ /**
+ * Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.stack([a, b, c]).print();
+ * ```
+ *
+ * @param tensors A list of tensor objects with the same shape and dtype.
+ * @param axis The axis to stack along. Defaults to 0 (the first dim).
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
+ function stack_(tensors, axis = 0) {
+ const $tensors = Object(tensor_util_env["b" /* convertToTensorArray */])(
+ tensors,
+ "tensors",
+ "stack"
+ );
+ util["assert"]($tensors.length >= 1, () => "Pass at least one tensor to tf.stack");
+ if ($tensors.length === 1) {
+ return $tensors[0].expandDims(axis);
+ }
+ const rank = $tensors[0].rank;
+ const shape = $tensors[0].shape;
+ const dtype = $tensors[0].dtype;
+ util["assert"](axis <= rank, () => "Axis must be <= rank of the tensor");
+ $tensors.forEach((t) => {
+ util["assertShapesMatch"](
+ shape,
+ t.shape,
+ "All tensors passed to stack must have matching shapes"
+ );
+ });
+ $tensors.forEach((t) => {
+ util["assert"](
+ dtype === t.dtype,
+ () => "All tensors passed to stack must have matching dtypes"
+ );
+ });
+ const expandedTensors = $tensors.map((t) => t.expandDims(axis));
+ return concat(expandedTensors, axis);
+ }
+ /**
+ * Unstacks a `tf.Tensor` of rank-`R` into a list of rank-`(R-1)` `tf.Tensor`s.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * tf.unstack(a).forEach(tensor => tensor.print());
+ * ```
+ *
+ * @param x A tensor object.
+ * @param axis The axis to unstack along. Defaults to 0 (the first dim).
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
+ function unstack_(x, axis = 0) {
+ axis = axis || 0;
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "unstack");
+ util["assert"](
+ axis >= -$x.shape.length && axis < $x.shape.length,
+ () => `Axis = ${axis} is not in [-${$x.shape.length}, ${$x.shape.length})`
+ );
+ if (axis < 0) {
+ axis += $x.shape.length;
+ }
+ const grad = (dy) => {
+ return { x: () => stack(dy, axis) };
+ };
+ const attrs = { axis };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.unstack($x, axis),
+ { x: $x },
+ grad,
+ "Unpack",
+ attrs
+ );
+ }
+ /**
+ * Returns a `tf.Tensor` that has expanded rank, by inserting a dimension
+ * into the tensor's shape.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const axis = 1;
+ * x.expandDims(axis).print();
+ * ```
+ *
+ * @param x The input tensor whose dimensions to be expanded.
+ * @param axis The dimension index at which to insert shape of `1`. Defaults
+ * to 0 (the first dimension).
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function expandDims_(x, axis = 0) {
+ const parseAs = null;
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(
+ x,
+ "x",
+ "expandDims",
+ parseAs
+ );
+ util["assert"](axis <= $x.rank, () => "Axis must be <= rank of the tensor");
+ const newShape = $x.shape.slice();
+ if (axis < 0) {
+ // Negative value is counted from the tail of rank.
+ util["assert"](
+ -($x.rank + 1) <= axis,
+ () => `Axis must be in the interval [${-($x.rank + 1)}, ${$x.rank}]`
+ );
+ axis = $x.rank + axis + 1;
+ }
+ newShape.splice(axis, 0, 1);
+ return reshape($x, newShape);
+ }
+ /**
+ * Computes the difference between two lists of numbers.
+ *
+ * Given a Tensor `x` and a Tensor `y`, this operation returns a Tensor `out`
+ * that represents all values that are in `x` but not in `y`. The returned
+ * Tensor `out` is sorted in the same order that the numbers appear in `x`
+ * (duplicates are preserved). This operation also returns a Tensor indices that
+ * represents the position of each out element in `x`. In other words:
+ *
+ * `out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]`
+ *
+ * ```js
+ * const x = [1, 2, 3, 4, 5, 6];
+ * const y = [1, 3, 5];
+ *
+ * const [out, indices] = await tf.setdiff1dAsync(x, y);
+ * out.print(); // [2, 4, 6]
+ * indices.print(); // [1, 3, 5]
+ * ```
+ *
+ * @param x 1-D Tensor. Values to keep.
+ * @param y 1-D Tensor. Must have the same type as x. Values to exclude in the
+ * output.
+ * @returns Promise of Tensor tuple [out, indices].
+ * out: Tensor with the same type as x.
+ * indices: A Tensor of type int32.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ async function setdiff1dAsync_(x, y) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "setdiff1d");
+ const $y = Object(tensor_util_env["a" /* convertToTensor */])(y, "y", "setdiff1d");
+ util["assert"](
+ $x.dtype === $y.dtype,
+ () => `x and y should have the same dtype, but got x (${$x.dtype}) and y (${$y.dtype}).`
+ );
+ util["assert"]($x.rank === 1, () => `x should be 1D tensor, but got x (${$x.shape}).`);
+ util["assert"]($y.rank === 1, () => `y should be 1D tensor, but got y (${$y.shape}).`);
+ const xVals = await $x.data();
+ const yVals = await $y.data();
+ const ySet = new Set(yVals);
+ let outputSize = 0;
+ for (let i = 0; i < xVals.length; i++) {
+ if (!ySet.has(xVals[i])) {
+ outputSize++;
+ }
+ }
+ const buffer = new dist_tensor["b" /* TensorBuffer */]([outputSize], $x.dtype);
+ const indices = new dist_tensor["b" /* TensorBuffer */]([outputSize], "int32");
+ for (let i = 0, p = 0; i < xVals.length; i++) {
+ if (!ySet.has(xVals[i])) {
+ buffer.values[p] = xVals[i];
+ indices.values[p] = i;
+ p++;
+ }
+ }
+ return [buffer.toTensor(), indices.toTensor()];
+ }
+ /**
+ * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.
+ *
+ * The values are stored in CPU as `TypedArray`. Fill the buffer using
+ * `buffer.set()`, or by modifying directly `buffer.values`.
+ *
+ * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with
+ * those values.
+ *
+ * ```js
+ * // Create a buffer and set values at particular indices.
+ * const buffer = tf.buffer([2, 2]);
+ * buffer.set(3, 0, 0);
+ * buffer.set(5, 1, 0);
+ *
+ * // Convert the buffer back to a tensor.
+ * buffer.toTensor().print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The dtype of the buffer. Defaults to 'float32'.
+ * @param values The values of the buffer as `TypedArray`. Defaults to
+ * zeros.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Creation'} */
+ function array_ops_buffer(shape, dtype = "float32", values) {
+ dtype = dtype || "float32";
+ util["assertNonNegativeIntegerDimensions"](shape);
+ return new dist_tensor["b" /* TensorBuffer */](shape, dtype, values);
+ }
+ /**
+ * Prints information about the `tf.Tensor` including its data.
+ *
+ * ```js
+ * const verbose = true;
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);
+ * ```
+ * @param x The tensor to be printed.
+ * @param verbose Whether to print verbose information about the ` Tensor`,
+ * including dtype and size.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Creation'} */
+ function print(x, verbose = false) {
+ console.log(x.toString(verbose));
+ }
+
+ const cast = Object(operation["a" /* op */])({ cast_ });
+ const expandDims = Object(operation["a" /* op */])({ expandDims_ });
+ const reshape = Object(operation["a" /* op */])({ reshape_ });
+ const squeeze = Object(operation["a" /* op */])({ squeeze_ });
+ const stack = Object(operation["a" /* op */])({ stack_ });
+ const unstack = Object(operation["a" /* op */])({ unstack_ });
+ const setdiff1dAsync = setdiff1dAsync_;
+ //# sourceMappingURL=array_ops.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
+ * The result is rounded with floor function.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.floorDiv(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ *
+ * a.floorDiv(b).print(); // or tf.floorDiv(a, b)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
+ function floorDiv_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "floorDiv");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "floorDiv");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ const forward = (backend, save) => {
+ const res = backend.floorDiv($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["D" /* FloorDiv */]
+ );
+ }
+ const floorDiv = Object(operation["a" /* op */])({ floorDiv_ });
+ //# sourceMappingURL=floorDiv.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/div.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.div(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ *
+ * a.div(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
+ function div_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "div");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "div");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ if ($a.dtype === "int32" && $b.dtype === "int32") {
+ return floorDiv($a, $b);
+ }
+ const forward = (backend, save) => {
+ const res = backend.realDivide($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ const attrs = {};
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["y" /* Div */],
+ attrs
+ );
+ }
+ const div = Object(operation["a" /* op */])({ div_ });
+ //# sourceMappingURL=div.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mul.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.
+ *
+ * We also expose `tf.mulStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([2, 3, 4, 5]);
+ *
+ * a.mul(b).print(); // or tf.mul(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast mul a with b.
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.scalar(5);
+ *
+ * a.mul(b).print(); // or tf.mul(a, b)
+ * ```
+ * @param a The first tensor to multiply.
+ * @param b The second tensor to multiply. Must have the same dtype as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
+ function mul_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "mul");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "mul");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ const forward = (backend, save) => {
+ const res = backend.multiply($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["Y" /* Multiply */]
+ );
+ }
+ const mul = Object(operation["a" /* op */])({ mul_ });
+ //# sourceMappingURL=mul.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Provided `f(x)`, returns another function `g(x, dy?)`, which gives the
+ * gradient of `f(x)` with respect to `x`.
+ *
+ * If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to
+ * `x` is computed instead. `f(x)` must take a single tensor `x` and return a
+ * single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead.
+ *
+ * ```js
+ * // f(x) = x ^ 2
+ * const f = x => x.square();
+ * // f'(x) = 2x
+ * const g = tf.grad(f);
+ *
+ * const x = tf.tensor1d([2, 3]);
+ * g(x).print();
+ * ```
+ *
+ * ```js
+ * // f(x) = x ^ 3
+ * const f = x => x.pow(tf.scalar(3, 'int32'));
+ * // f'(x) = 3x ^ 2
+ * const g = tf.grad(f);
+ * // f''(x) = 6x
+ * const gg = tf.grad(g);
+ *
+ * const x = tf.tensor1d([2, 3]);
+ * gg(x).print();
+ * ```
+ *
+ * @param f The function f(x), to compute gradient for.
+ */
+ /** @doc {heading: 'Training', subheading: 'Gradients'} */
+ function gradients_grad(f) {
+ util["assert"](util["isFunction"](f), () => "The f passed in grad(f) must be a function");
+ return (x, dy) => {
+ // x can be of any dtype, thus null as the last argument.
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tf.grad", null);
+ const $dy =
+ dy != null
+ ? Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "tf.grad")
+ : null;
+ return engine["a" /* ENGINE */].tidy(() => {
+ const { value, grads } = engine["a" /* ENGINE */].gradients(() => f($x), [$x], $dy);
+ if ($dy != null) {
+ util["assertShapesMatch"](
+ value.shape,
+ $dy.shape,
+ "The shape of dy passed in grad(f)(x, dy) must match the shape " +
+ "returned by f(x)"
+ );
+ }
+ checkGrads(grads);
+ return grads[0];
+ });
+ };
+ }
+ /**
+ * Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`,
+ * which gives an array of gradients of `f()` with respect to each input
+ * [`x1`,`x2`,...].
+ *
+ * If `dy` is passed when calling `g()`, the gradient of
+ * `f(x1,...).mul(dy).sum()` with respect to each input is computed instead.
+ * The provided `f` must take one or more tensors and return a single tensor
+ * `y`. If `f()` takes a single input, we recommend using `tf.grad` instead.
+ *
+ * ```js
+ * // f(a, b) = a * b
+ * const f = (a, b) => a.mul(b);
+ * // df / da = b, df / db = a
+ * const g = tf.grads(f);
+ *
+ * const a = tf.tensor1d([2, 3]);
+ * const b = tf.tensor1d([-2, -3]);
+ * const [da, db] = g([a, b]);
+ * console.log('da');
+ * da.print();
+ * console.log('db');
+ * db.print();
+ * ```
+ *
+ * @param f The function `f(x1, x2,...)` to compute gradients for.
+ */
+ /** @doc {heading: 'Training', subheading: 'Gradients'} */
+ function gradients_grads(f) {
+ util["assert"](util["isFunction"](f), () => "The f passed in grads(f) must be a function");
+ return (args, dy) => {
+ util["assert"](
+ Array.isArray(args),
+ () =>
+ "The args passed in grads(f)(args) must be an array " +
+ "of `Tensor`s or `TensorLike`s"
+ );
+ // args can be of any dtype, thus null as the last argument.
+ const $args = Object(tensor_util_env["b" /* convertToTensorArray */])(
+ args,
+ "args",
+ "tf.grads",
+ null
+ );
+ const $dy =
+ dy != null
+ ? Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "tf.grads")
+ : null;
+ return engine["a" /* ENGINE */].tidy(() => {
+ const { value, grads } = engine["a" /* ENGINE */].gradients(
+ () => f(...$args),
+ $args,
+ $dy
+ );
+ if ($dy != null) {
+ util["assertShapesMatch"](
+ value.shape,
+ $dy.shape,
+ "The shape of dy passed in grads(f)([x1,...], dy) must " +
+ "match the shape returned by f([x1,...])"
+ );
+ }
+ checkGrads(grads);
+ return grads;
+ });
+ };
+ }
+ /**
+ * Like `tf.grad`, but also returns the value of `f()`. Useful when `f()`
+ * returns a metric you want to show.
+ *
+ * The result is a rich object with the following properties:
+ * - grad: The gradient of `f(x)` w.r.t `x` (result of `tf.grad`).
+ * - value: The value returned by `f(x)`.
+ *
+ * ```js
+ * // f(x) = x ^ 2
+ * const f = x => x.square();
+ * // f'(x) = 2x
+ * const g = tf.valueAndGrad(f);
+ *
+ * const x = tf.tensor1d([2, 3]);
+ * const {value, grad} = g(x);
+ *
+ * console.log('value');
+ * value.print();
+ * console.log('grad');
+ * grad.print();
+ * ```
+ */
+ /** @doc {heading: 'Training', subheading: 'Gradients'} */
+ function valueAndGrad(f) {
+ util["assert"](
+ util["isFunction"](f),
+ () => "The f passed in valueAndGrad(f) must be a function"
+ );
+ return (x, dy) => {
+ util["assert"](
+ x instanceof dist_tensor["a" /* Tensor */],
+ () => "The x passed in valueAndGrad(f)(x) must be a tensor"
+ );
+ util["assert"](
+ dy == null || dy instanceof dist_tensor["a" /* Tensor */],
+ () => "The dy passed in valueAndGrad(f)(x, dy) must be a tensor"
+ );
+ const { grads, value } = engine["a" /* ENGINE */].gradients(() => f(x), [x], dy);
+ checkGrads(grads);
+ return { grad: grads[0], value };
+ };
+ }
+ /**
+ * Like `tf.grads`, but returns also the value of `f()`. Useful when `f()`
+ * returns a metric you want to show.
+ *
+ * The result is a rich object with the following properties:
+ * - grads: The gradients of `f()` w.r.t each input (result of `tf.grads`).
+ * - value: The value returned by `f(x)`.
+ *
+ * ```js
+ * // f(a, b) = a * b
+ * const f = (a, b) => a.mul(b);
+ * // df/da = b, df/db = a
+ * const g = tf.valueAndGrads(f);
+ *
+ * const a = tf.tensor1d([2, 3]);
+ * const b = tf.tensor1d([-2, -3]);
+ * const {value, grads} = g([a, b]);
+ *
+ * const [da, db] = grads;
+ *
+ * console.log('value');
+ * value.print();
+ *
+ * console.log('da');
+ * da.print();
+ * console.log('db');
+ * db.print();
+ * ```
+ */
+ /** @doc {heading: 'Training', subheading: 'Gradients'} */
+ function valueAndGrads(f) {
+ util["assert"](
+ util["isFunction"](f),
+ () => "The f passed in valueAndGrads(f) must be a function"
+ );
+ return (args, dy) => {
+ util["assert"](
+ Array.isArray(args) &&
+ args.every((arg) => arg instanceof dist_tensor["a" /* Tensor */]),
+ () => "The args passed in valueAndGrads(f)(args) must be array of " + "tensors"
+ );
+ util["assert"](
+ dy == null || dy instanceof dist_tensor["a" /* Tensor */],
+ () => "The dy passed in valueAndGrads(f)(args, dy) must be a tensor"
+ );
+ const res = engine["a" /* ENGINE */].gradients(() => f(...args), args, dy);
+ if (dy != null) {
+ util["assertShapesMatch"](
+ res.value.shape,
+ dy.shape,
+ "The shape of dy passed in valueAndGrads(f)([x1,...], dy) must " +
+ "match the shape returned by f([x1,...])"
+ );
+ }
+ checkGrads(res.grads);
+ return res;
+ };
+ }
+ /**
+ * Computes and returns the gradient of f(x) with respect to the list of
+ * trainable variables provided by `varList`. If no list is provided, it
+ * defaults to all trainable variables.
+ *
+ * ```js
+ * const a = tf.variable(tf.tensor1d([3, 4]));
+ * const b = tf.variable(tf.tensor1d([5, 6]));
+ * const x = tf.tensor1d([1, 2]);
+ *
+ * // f(a, b) = a * x ^ 2 + b * x
+ * const f = () => a.mul(x.square()).add(b.mul(x)).sum();
+ * // df/da = x ^ 2, df/db = x
+ * const {value, grads} = tf.variableGrads(f);
+ *
+ * Object.keys(grads).forEach(varName => grads[varName].print());
+ * ```
+ *
+ * @param f The function to execute. f() should return a scalar.
+ * @param varList The list of variables to compute the gradients with respect
+ * to. Defaults to all trainable variables.
+ * @returns An object with the following keys and values:
+ * - `value`: The value of the function `f`.
+ * - `grads`: A map from the names of the variables to the gradients.
+ * If the `varList` argument is provided explicitly and contains a subset of
+ * non-trainable variables, this map in the return value will contain keys
+ * that map the names of the non-trainable variables to `null`.
+ */
+ /** @doc {heading: 'Training', subheading: 'Gradients'} */
+ function variableGrads(f, varList) {
+ util["assert"](
+ util["isFunction"](f),
+ () => "The f passed in variableGrads(f) must be a function"
+ );
+ util["assert"](
+ varList == null ||
+ (Array.isArray(varList) &&
+ varList.every((v) => v instanceof dist_tensor["c" /* Variable */])),
+ () => "The varList passed in variableGrads(f, varList) must be an array " + "of variables"
+ );
+ const specifiedVarList = varList != null;
+ if (!specifiedVarList) {
+ // Get all of the trainable variables.
+ varList = [];
+ for (const varName in engine["a" /* ENGINE */].registeredVariables) {
+ varList.push(engine["a" /* ENGINE */].registeredVariables[varName]);
+ }
+ }
+ const specifiedNonTrainable = specifiedVarList
+ ? varList.filter((variable) => !variable.trainable)
+ : null;
+ // Prune non-trainable variables.
+ const originalVarCount = varList.length;
+ varList = varList.filter((variable) => variable.trainable);
+ util["assert"](
+ varList.length > 0,
+ () =>
+ `variableGrads() expects at least one of the input variables to ` +
+ `be trainable, but none of the ${originalVarCount} variables is ` +
+ `trainable.`
+ );
+ const allowNoGradients = true;
+ const { value, grads } = engine["a" /* ENGINE */].gradients(
+ f,
+ varList,
+ null,
+ allowNoGradients
+ );
+ util["assert"](
+ grads.some((g) => g != null),
+ () =>
+ "Cannot find a connection between any variable and the result of " +
+ "the loss function y=f(x). Please make sure the operations that " +
+ "use variables are inside the function f passed to minimize()."
+ );
+ util["assert"](
+ value.rank === 0,
+ () =>
+ `The f passed in variableGrads(f) must return a scalar, but it ` +
+ `returned a rank-${value.rank} tensor`
+ );
+ const namedGrads = {};
+ varList.forEach((v, i) => {
+ if (grads[i] != null) {
+ namedGrads[v.name] = grads[i];
+ }
+ });
+ if (specifiedNonTrainable != null) {
+ // If varList is explicitly provided and contains non-trainable values,
+ // add them to the returned gradients with `null` values.
+ specifiedNonTrainable.forEach((v) => (namedGrads[v.name] = null));
+ }
+ return { value, grads: namedGrads };
+ }
+ /**
+ * Overrides the gradient computation of a function `f`.
+ *
+ * Takes a function
+ * `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}`
+ * and returns another function `g(...inputs)` which takes the same inputs as
+ * `f`. When called, `g` returns `f().value`. In backward mode, custom gradients
+ * with respect to each input of `f` are computed using `f().gradFunc`.
+ *
+ * The `save` function passsed to `f` should be used for saving tensors needed
+ * in the gradient. And the `saved` passed to the `gradFunc` is a
+ * `NamedTensorMap`, which contains those saved tensor.
+ *
+ * ```js
+ * const customOp = tf.customGrad((x, save) => {
+ * // Save x to make sure it's available later for the gradient.
+ * save([x]);
+ * // Override gradient of our custom x ^ 2 op to be dy * abs(x);
+ * return {
+ * value: x.square(),
+ * // Note `saved.x` which points to the `x` we saved earlier.
+ * gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]
+ * };
+ * });
+ *
+ * const x = tf.tensor1d([-1, -2, 3]);
+ * const dx = tf.grad(x => customOp(x));
+ *
+ * console.log(`f(x):`);
+ * customOp(x).print();
+ * console.log(`f'(x):`);
+ * dx(x).print();
+ * ```
+ *
+ * @param f The function to evaluate in forward mode, which should return
+ * `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc`
+ * returns the custom gradients of `f` with respect to its inputs.
+ */
+ /** @doc {heading: 'Training', subheading: 'Gradients'} */
+ function customGrad(f) {
+ return engine["a" /* ENGINE */].customGrad(f);
+ }
+ function checkGrads(grads) {
+ const numNullGradients = grads.filter((g) => g == null).length;
+ if (numNullGradients > 0) {
+ throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that
+ the f you passed encloses all operations that lead from x to y.`);
+ }
+ }
+
+ //# sourceMappingURL=gradients.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js
+ /**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Returns true if the axis specifies the inner most dimensions of the
+ * array.
+ */
+ function axesAreInnerMostDims(axes, rank) {
+ for (let i = 0; i < axes.length; ++i) {
+ if (axes[axes.length - i - 1] !== rank - 1 - i) {
+ return false;
+ }
+ }
+ return true;
+ }
+ function combineLocations(outputLoc, reduceLoc, axes) {
+ const rank = outputLoc.length + reduceLoc.length;
+ const loc = [];
+ let outIdx = 0;
+ let reduceIdx = 0;
+ for (let dim = 0; dim < rank; dim++) {
+ if (axes.indexOf(dim) === -1) {
+ loc.push(outputLoc[outIdx++]);
+ } else {
+ loc.push(reduceLoc[reduceIdx++]);
+ }
+ }
+ return loc;
+ }
+ function computeOutAndReduceShapes(aShape, axes) {
+ const outShape = [];
+ const rank = aShape.length;
+ for (let dim = 0; dim < rank; dim++) {
+ if (axes.indexOf(dim) === -1) {
+ outShape.push(aShape[dim]);
+ }
+ }
+ const reduceShape = axes.map((dim) => aShape[dim]);
+ return [outShape, reduceShape];
+ }
+ function expandShapeToKeepDim(shape, axes) {
+ const reduceSubShape = axes.map((x) => 1);
+ return combineLocations(shape, reduceSubShape, axes);
+ }
+ function assertAxesAreInnerMostDims(msg, axes, rank) {
+ util["assert"](
+ axesAreInnerMostDims(axes, rank),
+ () =>
+ `${msg} supports only inner-most axes for now. ` +
+ `Got axes ${axes} and rank-${rank} input.`
+ );
+ }
+ /**
+ * Returns the axes permutation to be used with `tf.transpose`, if such
+ * permutation is necessary. Otherwise it returns null. This method is used by
+ * operations that operate only on inner-most axes.
+ */
+ function getAxesPermutation(axes, rank) {
+ if (axesAreInnerMostDims(axes, rank)) {
+ return null;
+ }
+ const result = [];
+ for (let i = 0; i < rank; ++i) {
+ if (axes.indexOf(i) === -1) {
+ result.push(i);
+ }
+ }
+ axes.forEach((axis) => result.push(axis));
+ return result;
+ }
+ /** Returns the axes permutation that undoes the original permutation. */
+ function getUndoAxesPermutation(axes) {
+ return axes
+ .map((axis, i) => [i, axis])
+ .sort((a, b) => a[1] - b[1])
+ .map((x) => x[0]);
+ }
+ function getInnerMostAxes(numAxes, rank) {
+ const res = [];
+ for (let i = rank - numAxes; i < rank; ++i) {
+ res.push(i);
+ }
+ return res;
+ }
+ //# sourceMappingURL=axis_util.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reduction_ops_util.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Gradient helper function for the min and max operations.
+ */
+ function gradForMinAndMax(dy, y, xOrig, origAxes, permutedAxes) {
+ if (y.rank < xOrig.rank) {
+ y = y.reshape(expandShapeToKeepDim(y.shape, origAxes));
+ }
+ if (dy.rank < xOrig.rank) {
+ dy = dy.reshape(expandShapeToKeepDim(dy.shape, origAxes));
+ }
+ return {
+ x: () => {
+ const dx = dy.mul(xOrig.equal(y).cast(dy.dtype));
+ return permutedAxes == null ? dx : dx.transpose(permutedAxes);
+ },
+ };
+ }
+ //# sourceMappingURL=reduction_ops_util.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reduction_ops.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the log(sum(exp(elements across the reduction dimensions)).
+ *
+ * Reduces the input along the dimensions given in `axis`. Unless `keepDims`
+ * is true, the rank of the array is reduced by 1 for each entry in `axis`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axis` has no entries, all dimensions are reduced, and an array with a
+ * single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.logSumExp().print(); // or tf.logSumExp(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.logSumExp(axis).print(); // or tf.logSumExp(a, axis)
+ * ```
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. If null (the default),
+ * reduces all dimensions.
+ * @param keepDims If true, retains reduced dimensions with length
+ * of 1. Defaults to false.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function logSumExp_(x, axis = null, keepDims = false) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "logSumExp");
+ const axes = util["parseAxisParam"](axis, $x.shape);
+ const xMax = $x.max(axes, true /* keepDims */);
+ const a = $x.sub(xMax);
+ const b = a.exp();
+ const c = b.sum(axes);
+ const d = c.log();
+ const res = xMax.reshape(d.shape).add(d);
+ if (keepDims) {
+ const newShape = expandShapeToKeepDim(res.shape, axes);
+ return res.reshape(newShape);
+ }
+ return res;
+ }
+ /**
+ * Computes the sum of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If axes has no entries, all dimensions are reduced, and a
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.sum().print(); // or tf.sum(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.sum(axis).print(); // or tf.sum(x, axis)
+ * ```
+ *
+ * @param x The input tensor to compute the sum over. If the dtype is `bool`
+ * it will be converted to `int32` and the output dtype will be `int32`.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function sum_(x, axis = null, keepDims = false) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sum");
+ if ($x.dtype === "bool") {
+ $x = $x.toInt();
+ }
+ const axes = util["parseAxisParam"](axis, $x.shape);
+ // Use a custom gradient to bypass 2 gradient backprops since sum is used
+ // extremely often.
+ const customOp = customGrad((x) => {
+ const permutation = getAxesPermutation(axes, x.rank);
+ let reductionAxes = axes;
+ let permutedX = x;
+ if (permutation != null) {
+ permutedX = x.transpose(permutation);
+ reductionAxes = getInnerMostAxes(reductionAxes.length, x.rank);
+ }
+ const gradFunc = (dy) => {
+ const expandedDyShape = x.shape.slice();
+ axes.forEach((axis) => {
+ expandedDyShape[axis] = 1;
+ });
+ const expandedDy = dy.reshape(expandedDyShape);
+ const derX = expandedDy.mul(Object(tensor_ops["b" /* ones */])(x.shape, "float32"));
+ return derX;
+ };
+ const gradInputs = (dy) => {
+ return { x: () => gradFunc(dy) };
+ };
+ const attrs = { axes: reductionAxes };
+ let value = engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.sum(permutedX, reductionAxes),
+ { x: permutedX },
+ gradInputs,
+ "Sum",
+ attrs
+ );
+ if (keepDims) {
+ const newShape = expandShapeToKeepDim(value.shape, axes);
+ value = value.reshape(newShape);
+ }
+ return { value, gradFunc };
+ });
+ return customOp($x);
+ }
+ /**
+ * Computes the product of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and a
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.prod().print(); // or tf.prod(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.prod(axis).print(); // or tf.prod(x, axis)
+ * ```
+ *
+ * @param x The input tensor to compute the product over. If the dtype is `bool`
+ * it will be converted to `int32` and the output dtype will be `int32`.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function prod_(x, axis = null, keepDims = false) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "prod");
+ if ($x.dtype === "bool") {
+ $x = $x.toInt();
+ }
+ const axes = util["parseAxisParam"](axis, $x.shape);
+ const permutation = getAxesPermutation(axes, $x.rank);
+ let reductionAxes = axes;
+ let permutedX = $x;
+ if (permutation != null) {
+ permutedX = $x.transpose(permutation);
+ reductionAxes = getInnerMostAxes(reductionAxes.length, $x.rank);
+ }
+ let value = engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.prod(permutedX, reductionAxes),
+ { permutedX }
+ );
+ if (keepDims) {
+ const newShape = expandShapeToKeepDim(value.shape, axes);
+ value = value.reshape(newShape);
+ }
+ return value;
+ }
+ /**
+ * Computes the mean of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is
+ * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with
+ * a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.mean().print(); // or tf.mean(a)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.mean(axis).print(); // or tf.mean(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function mean_(x, axis = null, keepDims = false) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "mean");
+ const axes = util["parseAxisParam"](axis, $x.shape);
+ const shapes = computeOutAndReduceShapes($x.shape, axes);
+ const reduceShape = shapes[1];
+ const reduceSize = util["sizeFromShape"](reduceShape);
+ // Use a custom gradient to bypass 2 gradient backprops since mean is used
+ // extremely often.
+ const customOp = customGrad((x) => {
+ const reduceSizeScalar = Object(tensor_ops["e" /* scalar */])(reduceSize);
+ // Cast if needed.
+ const xReduce = reduceSizeScalar.dtype === x.dtype ? x : x.cast(reduceSizeScalar.dtype);
+ const res = xReduce.div(reduceSizeScalar);
+ const value = res.sum(axis, keepDims);
+ const gradFunc = (dy) => {
+ const expandedDyShape = x.shape.slice();
+ axes.forEach((axis) => {
+ expandedDyShape[axis] = 1;
+ });
+ const expandedDy = dy.reshape(expandedDyShape);
+ const derX = expandedDy
+ .mul(Object(tensor_ops["b" /* ones */])(x.shape, "float32"))
+ .div(reduceSize);
+ return derX;
+ };
+ return { value, gradFunc };
+ });
+ return customOp($x);
+ }
+ /**
+ * Computes the minimum value from the input.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the array is reduced by 1 for each entry in `axes`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axes` has no entries, all dimensions are reduced, and an array with a
+ * single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.min().print(); // or tf.min(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.min(axis).print(); // or tf.min(x, axis)
+ * ```
+ *
+ * @param x The input Tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function min_(x, axis = null, keepDims = false) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "min");
+ const xOrig = $x;
+ const origAxes = util["parseAxisParam"](axis, $x.shape);
+ let axes = origAxes;
+ const permutedAxes = getAxesPermutation(axes, $x.rank);
+ if (permutedAxes != null) {
+ $x = $x.transpose(permutedAxes);
+ axes = getInnerMostAxes(axes.length, $x.rank);
+ }
+ const grad = (dy, saved) =>
+ gradForMinAndMax(dy, saved[1], saved[0], origAxes, permutedAxes);
+ const inputsToSave = [$x];
+ const outputsToSave = [true];
+ let res = engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const y = backend.min($x, axes);
+ save([xOrig, y]);
+ return y;
+ },
+ { x: $x },
+ grad,
+ "Min",
+ { axes },
+ inputsToSave,
+ outputsToSave
+ );
+ if (keepDims) {
+ const newShape = expandShapeToKeepDim(res.shape, origAxes);
+ res = res.reshape(newShape);
+ }
+ return res;
+ }
+ /**
+ * Returns the indices of the minimum values along an `axis`.
+ *
+ * The result has the same shape as `input` with the dimension along `axis`
+ * removed.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.argMin().print(); // or tf.argMin(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
+ *
+ * const axis = 1;
+ * x.argMin(axis).print(); // or tf.argMin(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
+ *
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function argMin_(x, axis = 0) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "argMin");
+ if (axis == null) {
+ axis = 0;
+ }
+ let axes = util["parseAxisParam"](axis, $x.shape);
+ const permutedAxes = getAxesPermutation(axes, $x.rank);
+ if (permutedAxes != null) {
+ $x = $x.transpose(permutedAxes);
+ axes = getInnerMostAxes(axes.length, $x.rank);
+ }
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])($x) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.argMin($x, axes[0]);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Returns the indices of the maximum values along an `axis`.
+ *
+ * The result has the same shape as `input` with the dimension along `axis`
+ * removed.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.argMax().print(); // or tf.argMax(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
+ *
+ * const axis = 1;
+ * x.argMax(axis).print(); // or tf.argMax(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function argMax_(x, axis = 0) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "argMax");
+ if (axis == null) {
+ axis = 0;
+ }
+ let axes = util["parseAxisParam"](axis, $x.shape);
+ const permutedAxes = getAxesPermutation(axes, $x.rank);
+ if (permutedAxes != null) {
+ $x = $x.transpose(permutedAxes);
+ axes = getInnerMostAxes(axes.length, $x.rank);
+ }
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => Object(tensor_ops["o" /* zerosLike */])($x) };
+ };
+ const attrs = { axis: axes[0] };
+ const inputsToSave = [$x];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.argMax($x, axes[0]);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "ArgMax",
+ attrs,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes the logical and of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 1, 1], 'bool');
+ *
+ * x.all().print(); // or tf.all(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
+ *
+ * const axis = 1;
+ * x.all(axis).print(); // or tf.all(x, axis)
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype bool.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function all_(x, axis = null, keepDims = false) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "all", "bool");
+ const origAxes = util["parseAxisParam"](axis, $x.shape);
+ let axes = origAxes;
+ const permutedAxes = getAxesPermutation(axes, $x.rank);
+ if (permutedAxes != null) {
+ $x = $x.transpose(permutedAxes);
+ axes = getInnerMostAxes(axes.length, $x.rank);
+ }
+ const res = engine["a" /* ENGINE */].runKernelFunc((backend) => backend.all($x, axes), {
+ $x,
+ });
+ if (keepDims) {
+ const newShape = expandShapeToKeepDim(res.shape, origAxes);
+ return res.reshape(newShape);
+ }
+ return res;
+ }
+ /**
+ * Computes the logical or of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 1, 1], 'bool');
+ *
+ * x.any().print(); // or tf.any(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
+ *
+ * const axis = 1;
+ * x.any(axis).print(); // or tf.any(x, axis)
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype bool.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Reduction'} */
+ function any_(x, axis = null, keepDims = false) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "any", "bool");
+ const origAxes = util["parseAxisParam"](axis, $x.shape);
+ let axes = origAxes;
+ const permutedAxes = getAxesPermutation(axes, $x.rank);
+ if (permutedAxes != null) {
+ $x = $x.transpose(permutedAxes);
+ axes = getInnerMostAxes(axes.length, $x.rank);
+ }
+ const res = engine["a" /* ENGINE */].runKernelFunc((backend) => backend.any($x, axes), {
+ $x,
+ });
+ if (keepDims) {
+ const newShape = expandShapeToKeepDim(res.shape, origAxes);
+ return res.reshape(newShape);
+ }
+ return res;
+ }
+ /**
+ * Calculates the mean and variance of `x`. The mean and variance are
+ * calculated by aggregating the contents of `x` across `axes`. If `x` is
+ * 1-D and `axes = [0]` this is just the mean and variance of a vector.
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) along with to compute mean and
+ * variance. By default it reduces all dimensions.
+ * @param keepDims If true, the moments have the same dimensionality as the
+ * input.
+ * @return An object with two keys: `mean` and `variance`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Normalization'} */
+ function moments_(x, axis = null, keepDims = false) {
+ x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "moments");
+ const axes = util["parseAxisParam"](axis, x.shape);
+ const mean = x.mean(axes, keepDims);
+ let keepDimsShape = mean.shape;
+ if (!keepDims) {
+ keepDimsShape = expandShapeToKeepDim(mean.shape, axes);
+ }
+ const devSquared = x.toFloat().sub(mean.reshape(keepDimsShape)).square();
+ const variance = devSquared.mean(axes, keepDims);
+ return { mean, variance };
+ }
+ const reduction_ops_all = Object(operation["a" /* op */])({ all_ });
+ // tslint:disable-next-line:variable-name
+ const any = Object(operation["a" /* op */])({ any_ });
+ const argMax = Object(operation["a" /* op */])({ argMax_ });
+ const argMin = Object(operation["a" /* op */])({ argMin_ });
+ const logSumExp = Object(operation["a" /* op */])({ logSumExp_ });
+ const reduction_ops_mean = Object(operation["a" /* op */])({ mean_ });
+ const reduction_ops_min = Object(operation["a" /* op */])({ min_ });
+ const moments = Object(operation["a" /* op */])({ moments_ });
+ const sum = Object(operation["a" /* op */])({ sum_ });
+ const reduction_ops_prod = Object(operation["a" /* op */])({ prod_ });
+ //# sourceMappingURL=reduction_ops.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/square.js
+ /**
+ * @license
+ * Copyright 2019 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes square of `x` element-wise: `x ^ 2`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);
+ *
+ * x.square().print(); // or tf.square(x)
+ * ```
+ * @param x The input Tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function square_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "square");
+ const attrs = {};
+ const inputsToSave = [$x];
+ const outputsToSave = [];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ save([$x]);
+ return backend.square($x);
+ },
+ { x: $x },
+ null /* grad */,
+ "Square",
+ attrs,
+ inputsToSave,
+ outputsToSave
+ );
+ }
+ const square = Object(operation["a" /* op */])({ square_ });
+ //# sourceMappingURL=square.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/unary_ops.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes `-1 * x` element-wise.
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);
+ *
+ * x.neg().print(); // or tf.neg(x)
+ * ```
+ *
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function neg_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "neg");
+ const grad = (dy) => {
+ return { x: () => dy.neg() };
+ };
+ const attrs = {};
+ const inputsToSave = [$x];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.neg($x),
+ { x: $x },
+ grad,
+ "Neg",
+ attrs,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.ceil().print(); // or tf.ceil(x)
+ * ```
+ * @param x The input Tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function ceil_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "ceil");
+ // TODO(manrajgrover): Return null for gradients when backprop supports it.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.ceil($x), { $x }, grad);
+ }
+ /**
+ * Computes floor of input `tf.Tensor` element-wise: `floor(x)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.floor().print(); // or tf.floor(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function floor_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "floor");
+ // TODO(nsthorat): Let gradients be null for cases where we want to stop
+ // backpropgation.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.floor($x), { $x }, grad);
+ }
+ /**
+ * Returns an element-wise indication of the sign of a number.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3, NaN, 0]);
+ *
+ * x.sign().print(); // or tf.sign(x)
+ * ```
+ * @param x The input Tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function sign_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sign");
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.sign($x), { $x }, grad);
+ }
+ /**
+ * RReturns which elements of x are NaN.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isNaN().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function isNaN_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "isNaN");
+ // TODO(nsthorat): Let gradients be null for cases where we want to stop
+ // backpropgation.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.isNaN($x), { $x }, grad);
+ }
+ /**
+ * Returns which elements of x are Infinity or -Infinity.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isInf().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function isInf_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "isInf");
+ // TODO(nsthorat): Let gradients be null for cases where we want to stop
+ // backpropgation.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.isInf($x), { $x }, grad);
+ }
+ /**
+ * Returns which elements of x are finite.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isFinite().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function isFinite_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "isFinite");
+ // TODO(nsthorat): Let gradients be null for cases where we want to stop
+ // backpropgation.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.isFinite($x),
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes round of input `tf.Tensor` element-wise: `round(x)`.
+ * It implements banker's rounding.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.round().print(); // or tf.round(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function round_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "round");
+ // TODO(nsthorat): Let gradients be null for cases where we want to stop
+ // backpropgation.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.round($x), { $x }, grad);
+ }
+ /**
+ * Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, -3]);
+ *
+ * x.exp().print(); // or tf.exp(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function exp_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "exp");
+ const bck = (dy, saved) => {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return { x: () => dy.mul(saved[0]) };
+ };
+ const attrs = {};
+ const inputsToSave = [];
+ const outputsToSave = [true];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const y = backend.exp($x);
+ save([y]);
+ return y;
+ },
+ { x: $x },
+ bck,
+ "Exp",
+ attrs,
+ inputsToSave,
+ outputsToSave
+ );
+ }
+ /**
+ * Computes exponential of the input `tf.Tensor` minus one element-wise.
+ * `e ^ x - 1`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, -3]);
+ *
+ * x.expm1().print(); // or tf.expm1(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function expm1_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "expm1");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.mul($x.exp()) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.expm1($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes natural logarithm of the input `tf.Tensor` element-wise: `ln(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.E]);
+ *
+ * x.log().print(); // or tf.log(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function log_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "log");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => dy.div($x.toFloat()) };
+ };
+ const attrs = {};
+ const inputsToSave = [$x];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.log($x);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "Log",
+ attrs,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes natural logarithm of the input `tf.Tensor` plus one
+ * element-wise: `ln(1 + x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.E - 1]);
+ *
+ * x.log1p().print(); // or tf.log1p(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function log1p_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "log1p");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.div($x.add(1)) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.log1p($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 4, -1]);
+ *
+ * x.sqrt().print(); // or tf.sqrt(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function sqrt_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sqrt");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => dy.div($x.toFloat().sqrt().mul(2)) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.sqrt($x);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "Sqrt",
+ {}
+ );
+ }
+ /**
+ * Computes reciprocal of square root of the input `tf.Tensor` element-wise:
+ * `y = 1 / sqrt(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 4, -1]);
+ *
+ * x.rsqrt().print(); // or tf.rsqrt(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function rsqrt_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "rsqrt");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => dy.div($x.pow(1.5).mul(2)).neg() };
+ };
+ const inputsToSave = [$x];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.rsqrt($x);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "Rsqrt",
+ {} /* attrs */,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes reciprocal of x element-wise: `1 / x`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, 2]);
+ *
+ * x.reciprocal().print(); // or tf.reciprocal(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function reciprocal_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "reciprocal");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.div($x.square().neg()) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.reciprocal($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes absolute value element-wise: `abs(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.abs().print(); // or tf.abs(x)
+ * ```
+ * @param x The input `tf.Tensor`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function abs_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "abs");
+ if ($x.dtype === "complex64") {
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.complexAbs($x), {
+ $x,
+ });
+ }
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => dy.mul($x.toFloat().step(-1)) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.abs($x);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "Abs"
+ );
+ }
+ /**
+ * Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
+ * ```
+ * @param x The input tensor.
+ * @param clipValueMin Lower-bound of range to be clipped to.
+ * @param clipValueMax Upper-bound of range to be clipped to.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function clipByValue_(x, clipValueMin, clipValueMax) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "clipByValue");
+ util["assert"](
+ clipValueMin <= clipValueMax,
+ () =>
+ `Error in clip: min (${clipValueMin}) must be ` +
+ `less than or equal to max (${clipValueMax}).`
+ );
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ x: () =>
+ dy.where(
+ $x.greaterEqual(clipValueMin).logicalAnd($x.lessEqual(clipValueMax)),
+ Object(tensor_ops["o" /* zerosLike */])(dy)
+ ),
+ };
+ };
+ const inputsToSave = [$x];
+ const attr = { min: clipValueMin, max: clipValueMax };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.clip($x, clipValueMin, clipValueMax);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "ClipByValue",
+ attr,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes sigmoid element-wise, `1 / (1 + exp(-x))`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, -1, 2, -3]);
+ *
+ * x.sigmoid().print(); // or tf.sigmoid(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function sigmoid_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sigmoid");
+ const grad = (dy, saved) => {
+ const [y] = saved;
+ return { x: () => dy.mul(y.mul(Object(tensor_ops["e" /* scalar */])(1).sub(y))) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const y = backend.sigmoid($x);
+ save([y]);
+ return y;
+ },
+ { x: $x },
+ grad,
+ "Sigmoid"
+ );
+ }
+ /**
+ * Computes log sigmoid of the input `tf.Tensor` element-wise:
+ * `logSigmoid(x)`. For numerical stability, we use `-tf.softplus(-x)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.logSigmoid().print(); // or tf.logSigmoid(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function logSigmoid_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "logSigmoid");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.mul($x.neg().sigmoid()) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.softplus($x.neg()).neg();
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes softplus of the input `tf.Tensor` element-wise: `log(exp(x) + 1)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.softplus().print(); // or tf.softplus(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function softplus_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "softplus");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.mul($x.sigmoid()) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.softplus($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes sin of the input Tensor element-wise: `sin(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.sin().print(); // or tf.sin(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function sin_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sin");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => $x.toFloat().cos().mul(dy) };
+ };
+ const inputsToSave = [$x];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.sin($x);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "Sin",
+ {} /* attrs */,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes cos of the input `tf.Tensor` element-wise: `cos(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.cos().print(); // or tf.cos(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function cos_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cos");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { x: () => $x.toFloat().sin().neg().mul(dy) };
+ };
+ const inputsToSave = [$x];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.cos($x);
+ save([$x]);
+ return res;
+ },
+ { x: $x },
+ grad,
+ "Cos",
+ {} /* attrs */,
+ inputsToSave
+ );
+ }
+ /**
+ * Computes tan of the input `tf.Tensor` element-wise, `tan(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.tan().print(); // or tf.tan(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function tan_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tan");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.div($x.cos().square()) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.tan($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes asin of the input `tf.Tensor` element-wise: `asin(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.asin().print(); // or tf.asin(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function asin_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "asin");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ $x: () =>
+ dy.div(Object(tensor_ops["e" /* scalar */])(1).sub($x.toFloat().square()).sqrt()),
+ };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.asin($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes acos of the input `tf.Tensor` element-wise: `acos(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.acos().print(); // or tf.acos(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function acos_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "acos");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ $x: () => {
+ const a = $x.toFloat().square();
+ const b = Object(tensor_ops["e" /* scalar */])(1).sub(a).sqrt();
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return dy.div(b).neg();
+ },
+ };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.acos($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes atan of the input `tf.Tensor` element-wise: `atan(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.atan().print(); // or tf.atan(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function atan_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "atan");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return { $x: () => dy.div($x.toFloat().square().add(1)) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.atan($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes hyperbolic sin of the input `tf.Tensor` element-wise: `sinh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.sinh().print(); // or tf.sinh(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function sinh_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sinh");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return { $x: () => $x.toFloat().cosh().mul(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.sinh($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes hyperbolic cos of the input `tf.Tensor` element-wise: `cosh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.cosh().print(); // or tf.cosh(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function cosh_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cosh");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return { $x: () => $x.toFloat().sinh().mul(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.cosh($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes hyperbolic tangent of the input `tf.Tensor` element-wise: `tanh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, 70]);
+ *
+ * x.tanh().print(); // or tf.tanh(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function tanh_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tanh");
+ const grad = (dy, saved) => {
+ const [y] = saved;
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return { x: () => Object(tensor_ops["e" /* scalar */])(1).sub(y.square()).mul(dy) };
+ };
+ const outputsToSave = [true];
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const y = backend.tanh($x);
+ save([y]);
+ return y;
+ },
+ { x: $x },
+ grad,
+ "Tanh",
+ {} /* attrs */,
+ null /* inputsToSave */,
+ outputsToSave
+ );
+ }
+ /**
+ * Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise:
+ * `asinh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.asinh().print(); // or tf.asinh(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function asinh_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "asinh");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ $x: () => {
+ const a = Object(tensor_ops["e" /* scalar */])(1).add($x.toFloat().square()).sqrt();
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return dy.div(a);
+ },
+ };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.asinh($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise:
+ * `acosh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([10, 1, 3, 5.7]);
+ *
+ * x.acosh().print(); // or tf.acosh(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function acosh_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "acosh");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ $x: () => {
+ const a = $x.toFloat().square().sub(1).sqrt();
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return dy.div(a);
+ },
+ };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.acosh($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise:
+ * `atanh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, .1, -.1, .7]);
+ *
+ * x.atanh().print(); // or tf.atanh(x)
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function atanh_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "atanh");
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ $x: () => dy.div(Object(tensor_ops["e" /* scalar */])(1).sub($x.toFloat().square())),
+ };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.atanh($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes gause error function of the input `tf.Tensor` element-wise:
+ * `erf(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, .1, -.1, .7]);
+ *
+ * x.erf().print(); // or tf.erf(x);
+ * ```
+ * @param x The input tensor.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function erf_(x) {
+ let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "erf");
+ util["assert"](
+ $x.dtype === "int32" || $x.dtype === "float32",
+ () => "Input dtype must be `int32` or `float32`."
+ );
+ if ($x.dtype === "int32") {
+ $x = $x.toFloat();
+ }
+ const grad = (dy, saved) => {
+ const [$x] = saved;
+ return {
+ $x: () =>
+ dy.mul(
+ $x
+ .square()
+ .neg()
+ .exp()
+ .mul(2 / Math.sqrt(Math.PI))
+ ),
+ };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.erf($x);
+ save([$x]);
+ return res;
+ },
+ { $x },
+ grad
+ );
+ }
+ /**
+ * Computes step of the input `tf.Tensor` element-wise: `x > 0 ? 1 : alpha * x`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 2, -1, -3]);
+ *
+ * x.step(.5).print(); // or tf.step(x, .5)
+ * ```
+ * @param x The input tensor.
+ * @param alpha The gradient when input is negative.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Basic math'} */
+ function step_(x, alpha = 0.0) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "step");
+ // TODO(manrajgrover): Return null for gradients when backprop supports
+ // it.
+ const grad = (dy) => {
+ return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.step($x, alpha),
+ { $x },
+ grad
+ );
+ }
+ const abs = Object(operation["a" /* op */])({ abs_ });
+ const acos = Object(operation["a" /* op */])({ acos_ });
+ const acosh = Object(operation["a" /* op */])({ acosh_ });
+ const asin = Object(operation["a" /* op */])({ asin_ });
+ const asinh = Object(operation["a" /* op */])({ asinh_ });
+ const atan = Object(operation["a" /* op */])({ atan_ });
+ const atanh = Object(operation["a" /* op */])({ atanh_ });
+ const ceil = Object(operation["a" /* op */])({ ceil_ });
+ const clipByValue = Object(operation["a" /* op */])({ clipByValue_ });
+ const cos = Object(operation["a" /* op */])({ cos_ });
+ const cosh = Object(operation["a" /* op */])({ cosh_ });
+ const erf = Object(operation["a" /* op */])({ erf_ });
+ const unary_ops_exp = Object(operation["a" /* op */])({ exp_ });
+ const expm1 = Object(operation["a" /* op */])({ expm1_ });
+ const floor = Object(operation["a" /* op */])({ floor_ });
+ const log = Object(operation["a" /* op */])({ log_ });
+ const log1p = Object(operation["a" /* op */])({ log1p_ });
+ const logSigmoid = Object(operation["a" /* op */])({ logSigmoid_ });
+ const neg = Object(operation["a" /* op */])({ neg_ });
+ const reciprocal = Object(operation["a" /* op */])({ reciprocal_ });
+ const round = Object(operation["a" /* op */])({ round_ });
+ const rsqrt = Object(operation["a" /* op */])({ rsqrt_ });
+ const sigmoid = Object(operation["a" /* op */])({ sigmoid_ });
+ const sign = Object(operation["a" /* op */])({ sign_ });
+ const unary_ops_isNaN = Object(operation["a" /* op */])({ isNaN_ });
+ const isInf = Object(operation["a" /* op */])({ isInf_ });
+ const unary_ops_isFinite = Object(operation["a" /* op */])({ isFinite_ });
+ const sin = Object(operation["a" /* op */])({ sin_ });
+ const sinh = Object(operation["a" /* op */])({ sinh_ });
+ const softplus = Object(operation["a" /* op */])({ softplus_ });
+ const sqrt = Object(operation["a" /* op */])({ sqrt_ });
+ const unary_ops_step = Object(operation["a" /* op */])({ step_ });
+ const tan = Object(operation["a" /* op */])({ tan_ });
+ const tanh = Object(operation["a" /* op */])({ tanh_ });
+ //# sourceMappingURL=unary_ops.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Atan2_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const atan2GradConfig = {
+ kernelName: kernel_names["c" /* Atan2 */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ const d = add(square(a), square(b));
+ let res = mul(dy, div(b, d));
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, a.shape);
+ };
+ const derB = () => {
+ const d = add(square(a), square(b));
+ let res = neg(mul(dy, div(a, d)));
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, b.shape);
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Atan2_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js
+ /**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ function computePool2DInfo(
+ inShape,
+ filterSize,
+ strides,
+ dilations,
+ pad,
+ roundingMode,
+ dataFormat = "channelsLast"
+ ) {
+ const [filterHeight, filterWidth] = parseTupleParam(filterSize);
+ let filterShape;
+ if (dataFormat === "channelsLast") {
+ filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]];
+ } else if (dataFormat === "channelsFirst") {
+ filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]];
+ } else {
+ throw new Error(`Unknown dataFormat ${dataFormat}`);
+ }
+ return computeConv2DInfo(
+ inShape,
+ filterShape,
+ strides,
+ dilations,
+ pad,
+ roundingMode,
+ false,
+ dataFormat
+ );
+ }
+ /**
+ * Computes the information for a forward pass of a pooling3D operation.
+ */
+ function computePool3DInfo(
+ inShape,
+ filterSize,
+ strides,
+ dilations,
+ pad,
+ roundingMode,
+ dataFormat = "NDHWC"
+ ) {
+ const [filterDepth, filterHeight, filterWidth] = parse3TupleParam(filterSize);
+ let filterShape;
+ let $dataFormat;
+ if (dataFormat === "NDHWC") {
+ $dataFormat = "channelsLast";
+ filterShape = [filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]];
+ } else if (dataFormat === "NCDHW") {
+ $dataFormat = "channelsFirst";
+ filterShape = [filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]];
+ } else {
+ throw new Error(`Unknown dataFormat ${dataFormat}`);
+ }
+ return computeConv3DInfo(
+ inShape,
+ filterShape,
+ strides,
+ dilations,
+ pad,
+ false,
+ $dataFormat,
+ roundingMode
+ );
+ }
+ /**
+ * Computes the information for a forward pass of a convolution/pooling
+ * operation.
+ */
+ function computeConv2DInfo(
+ inShape,
+ filterShape,
+ strides,
+ dilations,
+ pad,
+ roundingMode,
+ depthwise = false,
+ dataFormat = "channelsLast"
+ ) {
+ let [batchSize, inHeight, inWidth, inChannels] = [-1, -1, -1, -1];
+ if (dataFormat === "channelsLast") {
+ [batchSize, inHeight, inWidth, inChannels] = inShape;
+ } else if (dataFormat === "channelsFirst") {
+ [batchSize, inChannels, inHeight, inWidth] = inShape;
+ } else {
+ throw new Error(`Unknown dataFormat ${dataFormat}`);
+ }
+ const [filterHeight, filterWidth, , filterChannels] = filterShape;
+ const [strideHeight, strideWidth] = parseTupleParam(strides);
+ const [dilationHeight, dilationWidth] = parseTupleParam(dilations);
+ const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
+ const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
+ const { padInfo, outHeight, outWidth } = getPadAndOutInfo(
+ pad,
+ inHeight,
+ inWidth,
+ strideHeight,
+ strideWidth,
+ effectiveFilterHeight,
+ effectiveFilterWidth,
+ roundingMode,
+ dataFormat
+ );
+ const outChannels = depthwise ? filterChannels * inChannels : filterChannels;
+ let outShape;
+ if (dataFormat === "channelsFirst") {
+ outShape = [batchSize, outChannels, outHeight, outWidth];
+ } else if (dataFormat === "channelsLast") {
+ outShape = [batchSize, outHeight, outWidth, outChannels];
+ }
+ return {
+ batchSize,
+ dataFormat,
+ inHeight,
+ inWidth,
+ inChannels,
+ outHeight,
+ outWidth,
+ outChannels,
+ padInfo,
+ strideHeight,
+ strideWidth,
+ filterHeight,
+ filterWidth,
+ effectiveFilterHeight,
+ effectiveFilterWidth,
+ dilationHeight,
+ dilationWidth,
+ inShape,
+ outShape,
+ filterShape,
+ };
+ }
+ /**
+ * Computes the information for a forward pass of a 3D convolution/pooling
+ * operation.
+ */
+ function computeConv3DInfo(
+ inShape,
+ filterShape,
+ strides,
+ dilations,
+ pad,
+ depthwise = false,
+ dataFormat = "channelsLast",
+ roundingMode
+ ) {
+ let [batchSize, inDepth, inHeight, inWidth, inChannels] = [-1, -1, -1, -1, -1];
+ if (dataFormat === "channelsLast") {
+ [batchSize, inDepth, inHeight, inWidth, inChannels] = inShape;
+ } else if (dataFormat === "channelsFirst") {
+ [batchSize, inChannels, inDepth, inHeight, inWidth] = inShape;
+ } else {
+ throw new Error(`Unknown dataFormat ${dataFormat}`);
+ }
+ const [filterDepth, filterHeight, filterWidth, , filterChannels] = filterShape;
+ const [strideDepth, strideHeight, strideWidth] = parse3TupleParam(strides);
+ const [dilationDepth, dilationHeight, dilationWidth] = parse3TupleParam(dilations);
+ const effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth);
+ const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
+ const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
+ const { padInfo, outDepth, outHeight, outWidth } = get3DPadAndOutInfo(
+ pad,
+ inDepth,
+ inHeight,
+ inWidth,
+ strideDepth,
+ strideHeight,
+ strideWidth,
+ effectiveFilterDepth,
+ effectiveFilterHeight,
+ effectiveFilterWidth,
+ roundingMode
+ );
+ const outChannels = depthwise ? filterChannels * inChannels : filterChannels;
+ let outShape;
+ if (dataFormat === "channelsFirst") {
+ outShape = [batchSize, outChannels, outDepth, outHeight, outWidth];
+ } else if (dataFormat === "channelsLast") {
+ outShape = [batchSize, outDepth, outHeight, outWidth, outChannels];
+ }
+ return {
+ batchSize,
+ dataFormat,
+ inDepth,
+ inHeight,
+ inWidth,
+ inChannels,
+ outDepth,
+ outHeight,
+ outWidth,
+ outChannels,
+ padInfo,
+ strideDepth,
+ strideHeight,
+ strideWidth,
+ filterDepth,
+ filterHeight,
+ filterWidth,
+ effectiveFilterDepth,
+ effectiveFilterHeight,
+ effectiveFilterWidth,
+ dilationDepth,
+ dilationHeight,
+ dilationWidth,
+ inShape,
+ outShape,
+ filterShape,
+ };
+ }
+ function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) {
+ if (zeroPad == null) {
+ zeroPad = computeDefaultPad(inShape, fieldSize, stride);
+ }
+ const inputRows = inShape[0];
+ const inputCols = inShape[1];
+ const outputRows = conditionalRound(
+ (inputRows - fieldSize + 2 * zeroPad) / stride + 1,
+ roundingMode
+ );
+ util["assert"](
+ util["isInt"](outputRows),
+ () =>
+ `The output # of rows (${outputRows}) must be an integer. ` +
+ `Change the stride and/or zero pad parameters`
+ );
+ const outputCols = conditionalRound(
+ (inputCols - fieldSize + 2 * zeroPad) / stride + 1,
+ roundingMode
+ );
+ util["assert"](
+ util["isInt"](outputCols),
+ () =>
+ `The output # of columns (${outputCols}) must be an integer. ` +
+ `Change the stride and/or zero pad parameters`
+ );
+ return [outputRows, outputCols];
+ }
+ function computeOutputShape4D(
+ inShape,
+ fieldSize,
+ outChannels,
+ stride,
+ zeroPad,
+ roundingMode
+ ) {
+ if (zeroPad == null) {
+ zeroPad = computeDefaultPad(inShape, fieldSize, stride);
+ }
+ const inputDepth = inShape[0];
+ const inputRows = inShape[1];
+ const inputCols = inShape[2];
+ const outputDepths = conditionalRound(
+ (inputDepth - fieldSize + 2 * zeroPad) / stride + 1,
+ roundingMode
+ );
+ util["assert"](
+ util["isInt"](outputDepths),
+ () =>
+ `The output # of depths (${outputDepths}) must be an integer. ` +
+ `Change the stride and/or zero pad parameters`
+ );
+ const outputRows = conditionalRound(
+ (inputRows - fieldSize + 2 * zeroPad) / stride + 1,
+ roundingMode
+ );
+ util["assert"](
+ util["isInt"](outputRows),
+ () =>
+ `The output # of rows (${outputRows}) must be an integer. ` +
+ `Change the stride and/or zero pad parameters`
+ );
+ const outputCols = conditionalRound(
+ (inputCols - fieldSize + 2 * zeroPad) / stride + 1,
+ roundingMode
+ );
+ util["assert"](
+ util["isInt"](outputCols),
+ () =>
+ `The output # of columns (${outputCols}) must be an integer. ` +
+ `Change the stride and/or zero pad parameters`
+ );
+ return [outputDepths, outputRows, outputCols, outChannels];
+ }
+ function computeDefaultPad(inputShape, fieldSize, stride, dilation = 1) {
+ const effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation);
+ return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2);
+ }
+ function parseTupleParam(param) {
+ if (typeof param === "number") {
+ return [param, param, param];
+ }
+ if (param.length === 2) {
+ return [param[0], param[1], 1];
+ }
+ return param;
+ }
+ function parse3TupleParam(param) {
+ return typeof param === "number" ? [param, param, param] : param;
+ }
+ /* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d
+ * Atrous convolution is equivalent to standard convolution with upsampled
+ * filters with effective_filter_height =
+ * filter_height + (filter_height - 1) * (dilation - 1)
+ * and effective_filter_width =
+ * filter_width + (filter_width - 1) * (dilation - 1),
+ * produced by inserting dilation - 1 zeros along consecutive elements across
+ * the filters' spatial dimensions.
+ * When there is a dilation, this converts a filter dimension to the
+ * effective filter dimension, so it can be used in a standard convolution.
+ */
+ function getEffectiveFilterSize(filterSize, dilation) {
+ if (dilation <= 1) {
+ return filterSize;
+ }
+ return filterSize + (filterSize - 1) * (dilation - 1);
+ }
+ function getPadAndOutInfo(
+ pad,
+ inHeight,
+ inWidth,
+ strideHeight,
+ strideWidth,
+ filterHeight,
+ filterWidth,
+ roundingMode,
+ dataFormat
+ ) {
+ let padInfo;
+ let outHeight;
+ let outWidth;
+ if (typeof pad === "number") {
+ const padType = pad === 0 ? "VALID" : "NUMBER";
+ padInfo = { top: pad, bottom: pad, left: pad, right: pad, type: padType };
+ const outShape = computeOutputShape2D(
+ [inHeight, inWidth],
+ filterHeight,
+ strideHeight,
+ pad,
+ roundingMode
+ );
+ outHeight = outShape[0];
+ outWidth = outShape[1];
+ } else if (pad === "same") {
+ outHeight = Math.ceil(inHeight / strideHeight);
+ outWidth = Math.ceil(inWidth / strideWidth);
+ const padAlongHeight = Math.max(
+ 0,
+ (outHeight - 1) * strideHeight + filterHeight - inHeight
+ );
+ const padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth);
+ const top = Math.floor(padAlongHeight / 2);
+ const bottom = padAlongHeight - top;
+ const left = Math.floor(padAlongWidth / 2);
+ const right = padAlongWidth - left;
+ padInfo = { top, bottom, left, right, type: "SAME" };
+ } else if (pad === "valid") {
+ padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: "VALID" };
+ outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
+ outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
+ } else if (typeof pad === "object") {
+ const top = dataFormat === "channelsLast" ? pad[1][0] : pad[2][0];
+ const bottom = dataFormat === "channelsLast" ? pad[1][1] : pad[2][1];
+ const left = dataFormat === "channelsLast" ? pad[2][0] : pad[3][0];
+ const right = dataFormat === "channelsLast" ? pad[2][1] : pad[3][1];
+ const padType =
+ top === 0 && bottom === 0 && left === 0 && right === 0 ? "VALID" : "EXPLICIT";
+ padInfo = { top, bottom, left, right, type: padType };
+ outHeight = conditionalRound(
+ (inHeight - filterHeight + top + bottom) / strideHeight + 1,
+ roundingMode
+ );
+ outWidth = conditionalRound(
+ (inWidth - filterWidth + left + right) / strideWidth + 1,
+ roundingMode
+ );
+ } else {
+ throw Error(`Unknown padding parameter: ${pad}`);
+ }
+ return { padInfo, outHeight, outWidth };
+ }
+ function get3DPadAndOutInfo(
+ pad,
+ inDepth,
+ inHeight,
+ inWidth,
+ strideDepth,
+ strideHeight,
+ strideWidth,
+ filterDepth,
+ filterHeight,
+ filterWidth,
+ roundingMode
+ ) {
+ let padInfo;
+ let outDepth;
+ let outHeight;
+ let outWidth;
+ if (typeof pad === "number") {
+ const padType = pad === 0 ? "VALID" : "NUMBER";
+ padInfo = {
+ top: pad,
+ bottom: pad,
+ left: pad,
+ right: pad,
+ front: pad,
+ back: pad,
+ type: padType,
+ };
+ const outShape = computeOutputShape4D(
+ [inDepth, inHeight, inWidth, 1],
+ filterDepth,
+ 1,
+ strideDepth,
+ pad,
+ roundingMode
+ );
+ outDepth = outShape[0];
+ outHeight = outShape[1];
+ outWidth = outShape[2];
+ } else if (pad === "same") {
+ outDepth = Math.ceil(inDepth / strideDepth);
+ outHeight = Math.ceil(inHeight / strideHeight);
+ outWidth = Math.ceil(inWidth / strideWidth);
+ const padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth;
+ const padAlongHeight = (outHeight - 1) * strideHeight + filterHeight - inHeight;
+ const padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth;
+ const front = Math.floor(padAlongDepth / 2);
+ const back = padAlongDepth - front;
+ const top = Math.floor(padAlongHeight / 2);
+ const bottom = padAlongHeight - top;
+ const left = Math.floor(padAlongWidth / 2);
+ const right = padAlongWidth - left;
+ padInfo = { top, bottom, left, right, front, back, type: "SAME" };
+ } else if (pad === "valid") {
+ padInfo = {
+ top: 0,
+ bottom: 0,
+ left: 0,
+ right: 0,
+ front: 0,
+ back: 0,
+ type: "VALID",
+ };
+ outDepth = Math.ceil((inDepth - filterDepth + 1) / strideDepth);
+ outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
+ outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
+ } else {
+ throw Error(`Unknown padding parameter: ${pad}`);
+ }
+ return { padInfo, outDepth, outHeight, outWidth };
+ }
+ /**
+ * Rounds a value depending on the rounding mode
+ * @param value
+ * @param roundingMode
+ */
+ function conditionalRound(value, roundingMode) {
+ if (!roundingMode) {
+ return value;
+ }
+ switch (roundingMode) {
+ case "round":
+ // used for Caffe Conv
+ return Math.round(value);
+ case "ceil":
+ // used for Caffe Pool
+ return Math.ceil(value);
+ case "floor":
+ return Math.floor(value);
+ default:
+ throw new Error(`Unknown roundingMode ${roundingMode}`);
+ }
+ }
+ function tupleValuesAreOne(param) {
+ const [dimA, dimB, dimC] = parseTupleParam(param);
+ return dimA === 1 && dimB === 1 && dimC === 1;
+ }
+ function eitherStridesOrDilationsAreOne(strides, dilations) {
+ return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations);
+ }
+ /**
+ * Convert Conv2D dataFormat from 'NHWC'|'NCHW' to
+ * 'channelsLast'|'channelsFirst'
+ * @param dataFormat in 'NHWC'|'NCHW' mode
+ * @return dataFormat in 'channelsLast'|'channelsFirst' mode
+ * @throws unknown dataFormat
+ */
+ function convertConv2DDataFormat(dataFormat) {
+ if (dataFormat === "NHWC") {
+ return "channelsLast";
+ } else if (dataFormat === "NCHW") {
+ return "channelsFirst";
+ } else {
+ throw new Error(`Unknown dataFormat ${dataFormat}`);
+ }
+ }
+ //# sourceMappingURL=conv_util.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d_backprop.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the backprop of a 3d avg pool.
+ *
+ * @param dy The dy error, of rank 5 of shape
+ * [batchSize, depth, height, width, channels].
+ * assumed.
+ * @param input The original input image, of rank 5 or rank4 of shape
+ * [batchSize, depth, height, width, channels].
+ * @param filterSize The filter size:
+ * `[filterDepth, filterHeight, filterWidth]`.
+ * `filterSize` is a single number,
+ * then `filterDepth == filterHeight == filterWidth`.
+ * @param strides The strides of the pooling:
+ * `[strideDepth, strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dilations Deprecated, this field will be gone in v3.0.0. The dilation
+ * rates: `[dilationDepth, dilationHeight, dilationWidth]`
+ * in which we sample input values across the depth, height and width
+ * dimensions in dilated pooling.
+ * Defaults to `[1, 1, 1]`. If `dilations` is a single number,
+ * then `dilationDepth == dilationHeight == dilationWidth`.
+ * If it is greater than 1, then all values of `strides` must be 1.
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
+ * rounding mode used when computing output dimensions if pad is a
+ * number. If none is provided, it will not round and error if the output
+ * is of fractional size.
+ */
+ function avgPool3dBackprop_(
+ dy,
+ input,
+ filterSize,
+ strides,
+ dilations = [1, 1, 1],
+ pad,
+ dimRoundingMode
+ ) {
+ const $dy = Object(tensor_util_env["a" /* convertToTensor */])(
+ dy,
+ "dy",
+ "avgPool3dBackprop"
+ );
+ const $input = Object(tensor_util_env["a" /* convertToTensor */])(
+ input,
+ "input",
+ "avgPool3dBackprop"
+ );
+ let dy5D = $dy;
+ let input5D = $input;
+ let reshapedTo5D = false;
+ if ($input.rank === 4) {
+ reshapedTo5D = true;
+ dy5D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]);
+ input5D = reshape($input, [
+ 1,
+ $input.shape[0],
+ $input.shape[1],
+ $input.shape[2],
+ $input.shape[3],
+ ]);
+ }
+ util["assert"](
+ dy5D.rank === 5,
+ () => `Error in avgPool3dBackprop: dy must be rank 5 but got rank ` + `${dy5D.rank}.`
+ );
+ util["assert"](
+ input5D.rank === 5,
+ () =>
+ `Error in avgPool3dBackprop: input must be rank 5 but got rank ` + `${input5D.rank}.`
+ );
+ util["assert"](
+ eitherStridesOrDilationsAreOne(strides, dilations),
+ () =>
+ "Error in avgPool3dBackprop: Either strides or dilations " +
+ `must be 1. Got strides ${strides} and dilations '${dilations}'`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in maxPool3dBackprop: pad must be an integer when ` +
+ `using, dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const forward = (backend) => {
+ const convInfo = computePool3DInfo(
+ input5D.shape,
+ filterSize,
+ strides,
+ dilations,
+ pad,
+ dimRoundingMode
+ );
+ return backend.avgPool3dBackprop(dy5D, input5D, convInfo);
+ };
+ const inputs = { dy: dy5D, input: input5D };
+ const attrs = { filterSize, strides, dilations, pad, dimRoundingMode };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["f" /* AvgPool3DBackprop */],
+ attrs
+ );
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ const avgPool3dBackprop = Object(operation["a" /* op */])({ avgPool3dBackprop_ });
+ //# sourceMappingURL=avg_pool_3d_backprop.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool3D_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const avgPool3DGradConfig = {
+ kernelName: kernel_names["e" /* AvgPool3D */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved, attrs) => {
+ const [x] = saved;
+ const { filterSize, strides, dilations, pad, dimRoundingMode } = attrs;
+ const $dilations = dilations == null ? [1, 1, 1] : dilations;
+ return {
+ x: () =>
+ avgPool3dBackprop(dy, x, filterSize, strides, $dilations, pad, dimRoundingMode),
+ };
+ },
+ };
+ //# sourceMappingURL=AvgPool3D_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_backprop.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the backprop of an 2D avg pool.
+ *
+ * @param dy The dy error, of rank 4 or rank 3 of shape
+ * [batchSize, height, width, channels]. If rank 3, batch of 1 is
+ * assumed.
+ * @param input The input image, of rank 4 or rank 3 of shape
+ * [batchSize, height, width, channels]. If rank 3, batch of 1 is
+ * assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ */
+ function avgPoolBackprop_(dy, input, filterSize, strides, pad) {
+ const $dy = Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "avgPoolBackprop");
+ const $input = Object(tensor_util_env["a" /* convertToTensor */])(
+ input,
+ "input",
+ "avgPoolBackprop"
+ );
+ util["assert"](
+ $input.rank === $dy.rank,
+ () => `Rank of input (${$input.rank}) does not match rank of dy (${$dy.rank})`
+ );
+ let input4D = $input;
+ let dy4D = $dy;
+ let reshapedTo4D = false;
+ if ($input.rank === 3) {
+ reshapedTo4D = true;
+ input4D = reshape($input, [1, $input.shape[0], $input.shape[1], $input.shape[2]]);
+ dy4D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2]]);
+ }
+ util["assert"](
+ dy4D.rank === 4,
+ () => `Error in avgPoolBackprop: dy must be rank 4 but got rank ` + `${dy4D.rank}.`
+ );
+ util["assert"](
+ input4D.rank === 4,
+ () => `Error in avgPoolBackprop: input must be rank 4 but got rank ` + `${input4D.rank}.`
+ );
+ const forward = (backend) => {
+ const convInfo = computePool2DInfo(
+ input4D.shape,
+ filterSize,
+ strides,
+ 1 /* dilations */,
+ pad
+ );
+ return backend.avgPoolBackprop(dy4D, input4D, convInfo);
+ };
+ const inputs = { dy: dy4D, input: input4D };
+ const attrs = { filterSize, strides, pad };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["g" /* AvgPoolBackprop */],
+ attrs
+ );
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ const avgPoolBackprop = Object(operation["a" /* op */])({ avgPoolBackprop_ });
+ //# sourceMappingURL=avg_pool_backprop.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const avgPoolGradConfig = {
+ kernelName: kernel_names["d" /* AvgPool */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved, attrs) => {
+ const [x] = saved;
+ const { filterSize, strides, pad } = attrs;
+ return {
+ x: () => avgPoolBackprop(dy, x, filterSize, strides, pad),
+ };
+ },
+ };
+ //# sourceMappingURL=AvgPool_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the dot product of two matrices, A * B. These must be matrices.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2], [1, 2]);
+ * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * a.matMul(b).print(); // or tf.matMul(a, b)
+ * ```
+ * @param a First matrix in dot product operation.
+ * @param b Second matrix in dot product operation.
+ * @param transposeA If true, `a` is transposed before multiplication.
+ * @param transposeB If true, `b` is transposed before multiplication.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Matrices'} */
+ function matMul_(a, b, transposeA = false, transposeB = false) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "matMul");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "matMul");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ util["assert"](
+ $a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank,
+ () =>
+ `Error in matMul: inputs must have the same rank of at least 2, ` +
+ `got ranks ${$a.rank} and ${$b.rank}.`
+ );
+ const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];
+ const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];
+ const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];
+ const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];
+ const outerDimsA = $a.shape.slice(0, -2);
+ const outerDimsB = $b.shape.slice(0, -2);
+ const batchDimA = util["sizeFromShape"](outerDimsA);
+ const batchDimB = util["sizeFromShape"](outerDimsB);
+ util["assert"](
+ util["arraysEqual"](outerDimsA, outerDimsB),
+ () =>
+ `Error in matMul: outer dimensions (${outerDimsA}) and (` +
+ `${outerDimsB}) of Tensors with shapes ${$a.shape} and ` +
+ `${$b.shape} must match.`
+ );
+ util["assert"](
+ innerShapeA === innerShapeB,
+ () =>
+ `Error in matMul: inner shapes (${innerShapeA}) and (` +
+ `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +
+ `${$b.shape} and transposeA=${transposeA}` +
+ ` and transposeB=${transposeB} must match.`
+ );
+ const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]);
+ const a3D = transposeA
+ ? reshape($a, [batchDimA, innerShapeA, outerShapeA])
+ : reshape($a, [batchDimA, outerShapeA, innerShapeA]);
+ const b3D = transposeB
+ ? reshape($b, [batchDimB, outerShapeB, innerShapeB])
+ : reshape($b, [batchDimB, innerShapeB, outerShapeB]);
+ const forward = (backend, save) => {
+ save([a3D, b3D]);
+ return backend.batchMatMul(a3D, b3D, transposeA, transposeB);
+ };
+ const inputs = { a: a3D, b: b3D };
+ const attrs = { transposeA, transposeB };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["h" /* BatchMatMul */],
+ attrs
+ );
+ return reshape(res, outShape);
+ }
+ const matMul = Object(operation["a" /* op */])({ matMul_ });
+ //# sourceMappingURL=mat_mul.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/BatchMatMul_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const batchMatMulGradConfig = {
+ kernelName: kernel_names["h" /* BatchMatMul */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved, attrs) => {
+ const [a, b] = saved;
+ const { transposeA, transposeB } = attrs;
+ if (!transposeA && !transposeB) {
+ return {
+ a: () => matMul(dy, b, false, true),
+ b: () => matMul(a, dy, true, false),
+ };
+ } else if (!transposeA && transposeB) {
+ return {
+ a: () => matMul(dy, b, false, false),
+ b: () => matMul(dy, a, true, false),
+ };
+ } else if (transposeA && !transposeB) {
+ return {
+ a: () => matMul(b, dy, false, true),
+ b: () => matMul(a, dy, false, false),
+ };
+ } else {
+ return {
+ a: () => matMul(b, dy, true, true),
+ b: () => matMul(dy, a, true, true),
+ };
+ }
+ },
+ };
+ //# sourceMappingURL=BatchMatMul_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * This operation divides "spatial" dimensions `[1, ..., M]` of the input into
+ * a grid of blocks of shape `blockShape`, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial
+ * dimensions `[1, ..., M]` correspond to the position within the grid,
+ * and the batch dimension combines both the position within a spatial block
+ * and the original batch position. Prior to division into blocks,
+ * the spatial dimensions of the input are optionally zero padded
+ * according to `paddings`. See below for a precise description.
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]);
+ * const blockShape = [2, 2];
+ * const paddings = [[0, 0], [0, 0]];
+ *
+ * x.spaceToBatchND(blockShape, paddings).print();
+ * ```
+ *
+ * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
+ * remainingShape`, where spatialShape has `M` dimensions.
+ * @param blockShape A 1-D array. Must have shape `[M]`, all values must
+ * be >= 1.
+ * @param paddings A 2-D array. Must have shape `[M, 2]`, all values must be >=
+ * 0. `paddings[i] = [padStart, padEnd]` specifies the amount to zero-pad
+ * from input dimension `i + 1`, which corresponds to spatial dimension `i`. It
+ * is required that
+ * `(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0`
+ *
+ * This operation is equivalent to the following steps:
+ *
+ * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input
+ * according to `paddings` to produce `padded` of shape paddedShape.
+ *
+ * 2. Reshape `padded` to `reshapedPadded` of shape:
+ * `[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape`
+ *
+ * 3. Permute dimensions of `reshapedPadded` to produce `permutedReshapedPadded`
+ * of shape: `blockShape + [batch] + [paddedShape[1] / blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1]] + remainingShape`
+ *
+ * 4. Reshape `permutedReshapedPadded` to flatten `blockShape` into the
+ * batch dimension, producing an output tensor of shape:
+ * `[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1]] + remainingShape`
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function spaceToBatchND_(x, blockShape, paddings) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "spaceToBatchND");
+ util["assert"](
+ $x.rank >= 1 + blockShape.length,
+ () => `input rank ${$x.rank} should be > than [blockShape] ${blockShape.length}`
+ );
+ util["assert"](
+ paddings.length === blockShape.length,
+ () =>
+ `paddings.shape[0] ${paddings.length} must be equal to [blockShape] ${blockShape.length}`
+ );
+ util["assert"](
+ $x.shape.reduce((a, b, i) => {
+ if (i > 0 && i <= blockShape.length) {
+ return a && (b + paddings[i - 1][0] + paddings[i - 1][1]) % blockShape[i - 1] === 0;
+ }
+ return a;
+ }, true),
+ () =>
+ `input spatial dimensions ${$x.shape.slice(
+ 1
+ )} with paddings ${paddings.toString()} must be divisible by blockShapes ${blockShape.toString()}`
+ );
+ const forward = (backend) => backend.spaceToBatchND($x, blockShape, paddings);
+ const inputs = { x: $x };
+ const attrs = { blockShape, paddings };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["mb" /* SpaceToBatchND */],
+ attrs
+ );
+ }
+ const spaceToBatchND = Object(operation["a" /* op */])({ spaceToBatchND_ });
+ //# sourceMappingURL=space_to_batch_nd.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/BatchToSpaceND_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const batchToSpaceNDGradConfig = {
+ kernelName: kernel_names["i" /* BatchToSpaceND */],
+ gradFunc: (dy, saved, attrs) => {
+ const { blockShape, crops } = attrs;
+ return { x: () => spaceToBatchND(dy, blockShape, crops) };
+ },
+ };
+ //# sourceMappingURL=BatchToSpaceND_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/BroadcastTo_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const broadcastToGradConfig = {
+ kernelName: kernel_names["j" /* BroadcastTo */],
+ gradFunc: (dy, saved, attrs) => {
+ const broadCastToAttrs = attrs;
+ const inputShape = broadCastToAttrs.inputShape;
+ const outputShape = broadCastToAttrs.shape;
+ const reps = Array.from(outputShape);
+ for (let i = inputShape.length - 1; i >= 0; i--) {
+ if (inputShape[i] === outputShape[i]) {
+ reps[i] = 1;
+ } else if (inputShape[i] !== 1) {
+ throw new Error(
+ `broadcastTo(): [${inputShape}] cannot be broadcast to [${outputShape}].`
+ );
+ }
+ }
+ const axes = [];
+ for (let i = 0; i < reps.length; i++) {
+ if (reps[i] > 1) {
+ axes.push(i);
+ }
+ }
+ return { x: () => sum(dy, axes, true /* keepDims */) };
+ },
+ };
+ //# sourceMappingURL=BroadcastTo_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/split.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Splits a `tf.Tensor` into sub tensors.
+ *
+ * If `numOrSizeSplits` is a number, splits `x` along dimension `axis`
+ * into `numOrSizeSplits` smaller tensors.
+ * Requires that `numOrSizeSplits` evenly divides `x.shape[axis]`.
+ *
+ * If `numOrSizeSplits` is a number array, splits `x` into
+ * `numOrSizeSplits.length` pieces. The shape of the `i`-th piece has the
+ * same size as `x` except along dimension `axis` where the size is
+ * `numOrSizeSplits[i]`.
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [2, 4]);
+ * const [a, b] = tf.split(x, 2, 1);
+ * a.print();
+ * b.print();
+ *
+ * const [c, d, e] = tf.split(x, [1, 2, 1], 1);
+ * c.print();
+ * d.print();
+ * e.print();
+ * ```
+ *
+ * @param x The input tensor to split.
+ * @param numOrSizeSplits Either an integer indicating the number of
+ * splits along the axis or an array of integers containing the sizes of
+ * each output tensor along the axis. If a number then it must evenly divide
+ * `x.shape[axis]`; otherwise the sum of sizes must match `x.shape[axis]`.
+ * @param axis The dimension along which to split. Defaults to 0 (the first
+ * dim).
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
+ function split_(x, numOrSizeSplits, axis = 0) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "split");
+ const $axis = Object(util["parseAxisParam"])(axis, $x.shape)[0];
+ let splitSizes;
+ if (typeof numOrSizeSplits === "number") {
+ Object(util["assert"])(
+ $x.shape[$axis] % numOrSizeSplits === 0,
+ () => "Number of splits must evenly divide the axis."
+ );
+ splitSizes = new Array(numOrSizeSplits).fill($x.shape[$axis] / numOrSizeSplits);
+ } else {
+ Object(util["assert"])(
+ $x.shape[$axis] === numOrSizeSplits.reduce((a, b) => a + b),
+ () => "The sum of sizes must match the size of the axis dimension."
+ );
+ splitSizes = numOrSizeSplits;
+ }
+ const forward = (backend, _) => {
+ return backend.split($x, splitSizes, $axis);
+ };
+ const inputs = { x: $x };
+ const attr = { numOrSizeSplits, axis };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["nb" /* SplitV */],
+ attr
+ );
+ }
+ const split = Object(operation["a" /* op */])({ split_ });
+ //# sourceMappingURL=split.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Concat_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const concatGradConfig = {
+ kernelName: kernel_names["l" /* Concat */],
+ saveAllInputs: true,
+ gradFunc: (dy, saved, attrs) => {
+ const shapes = saved.map((t) => t.shape);
+ const { axis } = attrs;
+ const $axis = Object(util["parseAxisParam"])(axis, saved[0].shape)[0];
+ const sizeSplits = shapes.map((s) => s[$axis]);
+ const derTensors = split(dy, sizeSplits, $axis);
+ return derTensors.map((t) => () => t);
+ },
+ };
+ //# sourceMappingURL=Concat_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the derivative of the filter of a 2D convolution.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * [batch, height, width, inChannels]. If rank 3, batch of 1 is assumed.
+ * @param dy The dy image, of rank 4 or rank 3, of shape
+ * [batch, height, width, outDepth]. If rank 3, batch of 1 is assumed.
+ * @param filterShape The shape of the filter, length 4,
+ * [filterHeight, filterWidth, inDepth, outDepth].
+ * @param strides The strides of the convolution: [strideHeight,
+ * strideWidth].
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
+ * rounding mode used when computing output dimensions if pad is a
+ * number. If none is provided, it will not round and error if the output
+ * is of fractional size.
+ */
+ function conv2DBackpropFilter_(
+ x,
+ dy,
+ filterShape,
+ strides,
+ pad,
+ dataFormat = "NHWC",
+ dimRoundingMode
+ ) {
+ let x4D = x;
+ if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ let dy4D = dy;
+ if (dy4D.rank === 3) {
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ util["assert"](
+ x4D.rank === 4,
+ () => `Error in conv2dDerFilter: input must be rank 4, but got shape ` + `${x4D.shape}.`
+ );
+ util["assert"](
+ dy4D.rank === 4,
+ () => `Error in conv2dDerFilter: dy must be rank 4, but got shape ` + `${dy4D.shape}.`
+ );
+ util["assert"](
+ filterShape.length === 4,
+ () =>
+ `Error in conv2dDerFilter: filterShape must be length 4, but got ` + `${filterShape}.`
+ );
+ const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1];
+ const outDepth = dataFormat === "NHWC" ? dy4D.shape[3] : dy4D.shape[1];
+ util["assert"](
+ inDepth === filterShape[2],
+ () =>
+ `Error in conv2dDerFilter: depth of input ${inDepth}) must ` +
+ `match input depth in filter (${filterShape[2]}.`
+ );
+ util["assert"](
+ outDepth === filterShape[3],
+ () =>
+ `Error in conv2dDerFilter: depth of dy (${outDepth}) must ` +
+ `match output depth for filter (${filterShape[3]}).`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in conv2dDerFilter: pad must be an integer when using, ` +
+ `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const forward = (backend) => {
+ const dilations = 1;
+ const $dataFormat = convertConv2DDataFormat(dataFormat);
+ const convInfo = computeConv2DInfo(
+ x4D.shape,
+ filterShape,
+ strides,
+ dilations,
+ pad,
+ dimRoundingMode,
+ false,
+ $dataFormat
+ );
+ return backend.conv2dDerFilter(x4D, dy4D, convInfo);
+ };
+ const inputs = { x: x4D, dy: dy4D };
+ const attrs = { strides, pad, dataFormat, dimRoundingMode };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["n" /* Conv2DBackpropFilter */],
+ attrs
+ );
+ }
+ const conv2DBackpropFilter = Object(operation["a" /* op */])({ conv2DBackpropFilter_ });
+ //# sourceMappingURL=conv2d_backprop_filter.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the derivative of the input of a 2D convolution.
+ *
+ * @param xShape The shape of the input: [batch, height, width, inDepth].
+ * If length of 3, batch of 1 is assumed.
+ * @param dy The derivative of the output, of rank 4 or rank 3 of shape
+ * `[batch, outHeight, outWidth, outDepth]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm used:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dimRoundingMode The rounding mode used when computing output
+ * dimensions if pad is a number. If none is provided, it will not round
+ * and error if the output is of fractional size.
+ */
+ function conv2DBackpropInput_(
+ xShape,
+ dy,
+ filter,
+ strides,
+ pad,
+ dataFormat = "NHWC",
+ dimRoundingMode
+ ) {
+ util["assert"](
+ xShape.length === dy.rank,
+ () => `Length of inShape ` + `(${xShape.length}) and rank of dy (${dy.rank}) must match`
+ );
+ let xShape4D = xShape;
+ let dy4D = dy;
+ let reshapedTo4D = false;
+ if (dy.rank === 3) {
+ reshapedTo4D = true;
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ xShape4D = [1, xShape[0], xShape[1], xShape[2]];
+ }
+ util["assert"](
+ xShape4D.length === 4,
+ () =>
+ `Error in conv2dDerInput: inShape must be length 4, but got length ` +
+ `${xShape4D.length}.`
+ );
+ util["assert"](
+ dy4D.rank === 4,
+ () => `Error in conv2dDerInput: dy must be rank 4, but got ` + `rank ${dy4D.rank}`
+ );
+ util["assert"](
+ filter.rank === 4,
+ () => `Error in conv2dDerInput: filter must be rank 4, but got ` + `rank ${filter.rank}`
+ );
+ const inDepth = dataFormat === "NHWC" ? xShape4D[3] : xShape4D[1];
+ const outDepth = dataFormat === "NHWC" ? dy4D.shape[3] : dy4D.shape[1];
+ util["assert"](
+ inDepth === filter.shape[2],
+ () =>
+ `Error in conv2dDerInput: depth of input (${inDepth}) must ` +
+ `match input depth for filter ${filter.shape[2]}.`
+ );
+ util["assert"](
+ outDepth === filter.shape[3],
+ () =>
+ `Error in conv2dDerInput: depth of output (${outDepth}) must ` +
+ `match output depth for filter ${filter.shape[3]}.`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in conv2dDerInput: pad must be an integer when using, ` +
+ `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const forward = (backend, save) => {
+ const dilations = 1;
+ const $dataFormat = convertConv2DDataFormat(dataFormat);
+ const convInfo = computeConv2DInfo(
+ xShape4D,
+ filter.shape,
+ strides,
+ dilations,
+ pad,
+ dimRoundingMode,
+ false,
+ $dataFormat
+ );
+ const res = backend.conv2dDerInput(dy4D, filter, convInfo);
+ save([dy4D, filter]);
+ return res;
+ };
+ const inputs = { dy: dy4D, filter };
+ const attrs = { strides, pad, dataFormat, dimRoundingMode };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["o" /* Conv2DBackpropInput */],
+ attrs
+ );
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ const conv2DBackpropInput = Object(operation["a" /* op */])({ conv2DBackpropInput_ });
+ //# sourceMappingURL=conv2d_backprop_input.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2D_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const conv2DGradConfig = {
+ kernelName: kernel_names["m" /* Conv2D */],
+ inputsToSave: ["x", "filter"],
+ gradFunc: (dy, saved, attrs) => {
+ const [x4D, $filter] = saved;
+ const { dilations, strides, pad, dataFormat } = attrs;
+ util["assert"](
+ tupleValuesAreOne(dilations),
+ () =>
+ "Error in gradient of conv2D: dilation rates greater than 1 " +
+ `are not yet supported in gradients. Got dilations '${dilations}'`
+ );
+ return {
+ x: () => conv2DBackpropInput(x4D.shape, dy, $filter, strides, pad, dataFormat),
+ filter: () => conv2DBackpropFilter(x4D, dy, $filter.shape, strides, pad, dataFormat),
+ };
+ },
+ };
+ //# sourceMappingURL=Conv2D_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes a 2D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_guides/python/nn#Convolution](
+ * https://www.tensorflow.org/api_guides/python/nn#Convolution)
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dimRoundingMode The rounding mode used when computing output
+ * dimensions if pad is a number. If none is provided, it will not round
+ * and error if the output is of fractional size.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Convolution'} */
+ function conv2d_(
+ x,
+ filter,
+ strides,
+ pad,
+ dataFormat = "NHWC",
+ dilations = [1, 1],
+ dimRoundingMode
+ ) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "conv2d");
+ const $filter = Object(tensor_util_env["a" /* convertToTensor */])(
+ filter,
+ "filter",
+ "conv2d"
+ );
+ let x4D = $x;
+ let reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ util["assert"](
+ x4D.rank === 4,
+ () => `Error in conv2d: input must be rank 4, but got rank ${x4D.rank}.`
+ );
+ util["assert"](
+ $filter.rank === 4,
+ () => `Error in conv2d: filter must be rank 4, but got rank ` + `${$filter.rank}.`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in conv2d: pad must be an integer when using, ` +
+ `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1];
+ util["assert"](
+ inDepth === $filter.shape[2],
+ () =>
+ `Error in conv2d: depth of input (${inDepth}) must match ` +
+ `input depth for filter ${$filter.shape[2]}.`
+ );
+ util["assert"](
+ eitherStridesOrDilationsAreOne(strides, dilations),
+ () =>
+ "Error in conv2D: Either strides or dilations must be 1. " +
+ `Got strides ${strides} and dilations '${dilations}'`
+ );
+ const forward = (backend, save) => {
+ const $dataFormat = convertConv2DDataFormat(dataFormat);
+ const convInfo = computeConv2DInfo(
+ x4D.shape,
+ $filter.shape,
+ strides,
+ dilations,
+ pad,
+ dimRoundingMode,
+ false,
+ $dataFormat
+ );
+ const res = backend.conv2d(x4D, $filter, convInfo);
+ save([x4D, $filter]);
+ return res;
+ };
+ const inputs = { x: x4D, filter: $filter };
+ const attrs = { strides, pad, dataFormat, dilations, dimRoundingMode };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["m" /* Conv2D */],
+ attrs
+ );
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ const conv2d = Object(operation["a" /* op */])({ conv2d_ });
+ //# sourceMappingURL=conv2d.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2DBackpropInput_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const conv2DBackpropInputGradConfig = {
+ kernelName: kernel_names["o" /* Conv2DBackpropInput */],
+ inputsToSave: ["dy", "filter"],
+ gradFunc: (ddx, saved, attrs) => {
+ const [dy, filter] = saved;
+ const { strides, pad, dataFormat, dimRoundingMode } = attrs;
+ return {
+ dy: () =>
+ conv2d(ddx, filter, strides, pad, dataFormat, 1 /* dilations */, dimRoundingMode),
+ filter: () =>
+ conv2DBackpropFilter(
+ ddx,
+ dy,
+ filter.shape,
+ strides,
+ pad,
+ dataFormat,
+ dimRoundingMode
+ ),
+ };
+ },
+ };
+ //# sourceMappingURL=Conv2DBackpropInput_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_filter.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the derivative of the filter of a 3D convolution.
+ *
+ * @param x The input tensor, of rank 5 or rank 4 of shape
+ * [batch, depth, height, width, inChannels]. If rank 4, batch of 1 is
+ * assumed.
+ * @param dy The dy image, of rank 5 or rank 4, of shape
+ * [batch, depth, height, width, outDepth]. If rank 4, batch of 1 is
+ * assumed.
+ * @param filterShape The shape of the filter, length 5,
+ * [filterDepth, filterHeight, filterWidth, inDepth, outDepth].
+ * @param strides The strides of the convolution: [strideDepth, strideHeight,
+ * strideWidth].
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ */
+ function conv3DBackpropFilter_(x, dy, filterShape, strides, pad) {
+ let x5D = x;
+ if (x.rank === 4) {
+ x5D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2], x.shape[3]]);
+ }
+ let dy5D = dy;
+ if (dy5D.rank === 4) {
+ dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);
+ }
+ util["assert"](
+ x5D.rank === 5,
+ () => `Error in conv3dDerFilter: input must be rank 5, but got shape ` + `${x5D.shape}.`
+ );
+ util["assert"](
+ dy5D.rank === 5,
+ () => `Error in conv3dDerFilter: dy must be rank 5, but got shape ` + `${dy5D.shape}.`
+ );
+ util["assert"](
+ filterShape.length === 5,
+ () =>
+ `Error in conv3dDerFilter: filterShape must be length 5, but got ` + `${filterShape}.`
+ );
+ util["assert"](
+ x5D.shape[4] === filterShape[3],
+ () =>
+ `Error in conv3dDerFilter: depth of input ${x5D.shape[4]}) must ` +
+ `match input depth in filter (${filterShape[3]}.`
+ );
+ util["assert"](
+ dy5D.shape[4] === filterShape[4],
+ () =>
+ `Error in conv3dDerFilter: depth of dy (${dy5D.shape[4]}) must ` +
+ `match output depth for filter (${filterShape[4]}).`
+ );
+ const forward = (backend) => {
+ const dilations = 1;
+ const convInfo = computeConv3DInfo(x5D.shape, filterShape, strides, dilations, pad);
+ return backend.conv3dDerFilter(x5D, dy5D, convInfo);
+ };
+ const inputs = { x: x5D, y: dy5D };
+ const attrs = { strides, pad };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["q" /* Conv3DBackpropFilterV2 */],
+ attrs
+ );
+ }
+ const conv3DBackpropFilter = Object(operation["a" /* op */])({ conv3DBackpropFilter_ });
+ //# sourceMappingURL=conv3d_backprop_filter.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the derivative of the input of a 3D convolution.
+ *
+ * @param xShape The shape of the input: [batch, depth, height, width,
+ * in_channels]. If length of 4, batch of 1 is assumed.
+ * @param dy The derivative of the output, of rank 5 or rank 4 of shape
+ * `[batch, outDepth, outHeight, outWidth, in_channels]`.
+ * If rank 4, batch of 1 is assumed.
+ * @param filter The filter, rank 5, of shape
+ * `[filterDepth, filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideDepth, strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm used:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ */
+ function conv3DBackpropInput_(xShape, dy, filter, strides, pad) {
+ util["assert"](
+ xShape.length === dy.rank,
+ () => `Length of inShape ` + `(${xShape.length}) and rank of dy (${dy.rank}) must match`
+ );
+ let xShape5D = xShape;
+ let dy5D = dy;
+ let reshapedTo5D = false;
+ if (dy.rank === 4) {
+ reshapedTo5D = true;
+ dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);
+ xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]];
+ }
+ const inDepth = xShape5D[4];
+ const outDepth = dy5D.shape[4];
+ util["assert"](
+ xShape5D.length === 5,
+ () =>
+ `Error in conv3dDerInput: inShape must be length 5, but got length ` +
+ `${xShape5D.length}.`
+ );
+ util["assert"](
+ dy5D.rank === 5,
+ () => `Error in conv3dDerInput: dy must be rank 5, but got ` + `rank ${dy5D.rank}`
+ );
+ util["assert"](
+ filter.rank === 5,
+ () => `Error in conv3dDerInput: filter must be rank 5, but got ` + `rank ${filter.rank}`
+ );
+ util["assert"](
+ inDepth === filter.shape[3],
+ () =>
+ `Error in conv3dDerInput: depth of input (${inDepth}) must ` +
+ `match input depth for filter ${filter.shape[3]}.`
+ );
+ util["assert"](
+ outDepth === filter.shape[4],
+ () =>
+ `Error in conv3dDerInput: depth of output (${outDepth}) must ` +
+ `match output depth for filter ${filter.shape[4]}.`
+ );
+ const forward = (backend) => {
+ const dilations = 1;
+ const convInfo = computeConv3DInfo(xShape5D, filter.shape, strides, dilations, pad);
+ return backend.conv3dDerInput(dy5D, filter, convInfo);
+ };
+ const inputs = { dy: dy5D };
+ const attrs = { pad };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["r" /* Conv3DBackpropInputV2 */],
+ attrs
+ );
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ const conv3DBackpropInput = Object(operation["a" /* op */])({ conv3DBackpropInput_ });
+ //# sourceMappingURL=conv3d_backprop_input.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Conv3D_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const conv3DGradConfig = {
+ kernelName: kernel_names["p" /* Conv3D */],
+ inputsToSave: ["x", "filter"],
+ gradFunc: (dy, saved, attrs) => {
+ const { dilations, strides, pad } = attrs;
+ util["assert"](
+ tupleValuesAreOne(dilations),
+ () =>
+ "Error in gradient of conv3D: dilation rates greater than 1 are " +
+ `not yet supported in gradients. Got dilations '${dilations}'`
+ );
+ const [x5D, $filter] = saved;
+ return {
+ x: () => conv3DBackpropInput(x5D.shape, dy, $filter, strides, pad),
+ filter: () => conv3DBackpropFilter(x5D, dy, $filter.shape, strides, pad),
+ };
+ },
+ };
+ //# sourceMappingURL=Conv3D_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.
+ *
+ * The returned `tf.Tensor`'s dimension `i` will correspond to the input
+ * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,
+ * where `n` is the rank of the input `tf.Tensor`. Hence by default, this
+ * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);
+ *
+ * a.transpose().print(); // or tf.transpose(a)
+ * ```
+ *
+ * @param x The tensor to transpose.
+ * @param perm The permutation of the dimensions of a.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Matrices'} */
+ function transpose_(x, perm) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "transpose");
+ if (perm == null) {
+ perm = $x.shape.map((s, i) => i).reverse();
+ }
+ util["assert"](
+ $x.rank === perm.length,
+ () =>
+ `Error in transpose: rank of input ${$x.rank} ` + `must match length of perm ${perm}.`
+ );
+ perm.forEach((axis) => {
+ util["assert"](
+ axis >= 0 && axis < $x.rank,
+ () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` + ` but got ${perm}`
+ );
+ });
+ if ($x.rank <= 1) {
+ return $x.clone();
+ }
+ const attrs = { perm };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.transpose($x, perm),
+ { x: $x },
+ null /* gradient */,
+ "Transpose",
+ attrs
+ );
+ }
+ const transpose = Object(operation["a" /* op */])({ transpose_ });
+ //# sourceMappingURL=transpose.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the cumulative sum of a `tf.Tensor` along `axis`.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2, 3, 4]);
+ * x.cumsum().print();
+ * ```
+ * ```js
+ * const x = tf.tensor([[1, 2], [3, 4]]);
+ * x.cumsum().print();
+ * ```
+ *
+ * @param x The input tensor to be summed.
+ * @param axis The axis along which to sum. Optional. Defaults to 0.
+ * @param exclusive Whether to perform exclusive cumulative sum. Optional.
+ * Defaults to false. If set to true then the sum of each tensor entry
+ * does not include its own value, but only the values previous to it
+ * along the specified axis.
+ * @param reverse Whether to sum in the opposite direction. Optional.
+ * Defaults to false.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Scan'} */
+ function cumsum_(x, axis = 0, exclusive = false, reverse = false) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cumsum");
+ const forward = (backend, save) => {
+ const permutation = getAxesPermutation([axis], $x.rank);
+ let permutedX = $x;
+ if (permutation != null) {
+ permutedX = transpose($x, permutation);
+ }
+ const permutedAxis = getInnerMostAxes(1, $x.rank)[0];
+ let value = backend.cumsum(permutedX, permutedAxis, exclusive, reverse);
+ save([$x]);
+ if (permutation != null) {
+ value = transpose(value, permutation);
+ }
+ return value;
+ };
+ const inputs = { x: $x };
+ const attrs = { axis, exclusive, reverse };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["s" /* Cumsum */],
+ attrs
+ );
+ }
+ const cumsum = Object(operation["a" /* op */])({ cumsum_ });
+ //# sourceMappingURL=cumsum.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Cumsum_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const cumsumGradConfig = {
+ kernelName: kernel_names["s" /* Cumsum */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved, attrs) => {
+ const [x] = saved;
+ const { axis, exclusive, reverse } = attrs;
+ return {
+ x: () => {
+ const permutation = getAxesPermutation([axis], x.rank);
+ let out = cumsum(dy, axis, exclusive, !reverse);
+ if (permutation != null) {
+ out = transpose(out, permutation);
+ }
+ return out;
+ },
+ };
+ },
+ };
+ //# sourceMappingURL=Cumsum_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, convInfo) {
+ let x4D = x;
+ if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ let dy4D = dy;
+ if (dy4D.rank === 3) {
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ const forward = (backend) => backend.depthwiseConv2DDerFilter(x4D, dy4D, convInfo);
+ const inputs = { x: x4D, dy: dy4D };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["v" /* DepthwiseConv2dNativeBackpropFilter */]
+ );
+ }
+ const depthwiseConv2dNativeBackpropFilter = Object(operation["a" /* op */])({
+ depthwiseConv2dNativeBackpropFilter_,
+ });
+ //# sourceMappingURL=depthwise_conv2d_native_backprop_filter.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, convInfo) {
+ let dy4D = dy;
+ let reshapedTo4D = false;
+ if (dy.rank === 3) {
+ reshapedTo4D = true;
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ const forward = (backend) => backend.depthwiseConv2DDerInput(dy4D, filter, convInfo);
+ const inputs = { dy: dy4D };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["w" /* DepthwiseConv2dNativeBackpropInput */]
+ );
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ const depthwiseConv2dNativeBackpropInput = Object(operation["a" /* op */])({
+ depthwiseConv2dNativeBackpropInput_,
+ });
+ //# sourceMappingURL=depthwise_conv2d_native_backprop_input.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/DepthwiseConv2dNative_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const depthwiseConv2dNativeGradConfig = {
+ kernelName: kernel_names["u" /* DepthwiseConv2dNative */],
+ inputsToSave: ["x", "filter"],
+ gradFunc: (dy, saved, attrs) => {
+ const { dilations, strides, pad, dimRoundingMode } = attrs;
+ const $dilations = dilations == null ? [1, 1] : dilations;
+ util["assert"](
+ tupleValuesAreOne($dilations),
+ () =>
+ "Error in gradient of depthwiseConv2dNative: dilation rates " +
+ `greater than 1 are not yet supported. Got dilations ` +
+ `'${$dilations}'`
+ );
+ const [x, filter] = saved;
+ util["assert"](
+ x.rank === 4,
+ () =>
+ `Error in gradient of depthwiseConv2dNative: input must be ` +
+ `rank 4, but got rank ${x.rank}.`
+ );
+ util["assert"](
+ filter.rank === 4,
+ () =>
+ `Error in gradient of depthwiseConv2dNative: filter must be ` +
+ `rank 4, but got rank ${filter.rank}.`
+ );
+ util["assert"](
+ x.shape[3] === filter.shape[2],
+ () =>
+ `Error in gradient of depthwiseConv2d: number of input ` +
+ `channels (${x.shape[3]}) must match the inChannels dimension ` +
+ `in filter ${filter.shape[2]}.`
+ );
+ util["assert"](
+ eitherStridesOrDilationsAreOne(strides, $dilations),
+ () =>
+ "Error in gradient of depthwiseConv2d: Either strides or " +
+ `dilations must be 1. Got strides ${strides} and dilations ` +
+ `'${$dilations}'.`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in depthwiseConv2d: pad must be an integer when using, ` +
+ `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const convInfo = computeConv2DInfo(
+ x.shape,
+ filter.shape,
+ strides,
+ $dilations,
+ pad,
+ dimRoundingMode,
+ true /* depthwise */
+ );
+ return {
+ x: () => depthwiseConv2dNativeBackpropInput(x.shape, dy, filter, convInfo),
+ filter: () => depthwiseConv2dNativeBackpropFilter(x, dy, filter.shape, convInfo),
+ };
+ },
+ };
+ //# sourceMappingURL=DepthwiseConv2dNative_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Div_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const divGradConfig = {
+ kernelName: kernel_names["y" /* Div */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ const res = div(dy, b.toFloat());
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ return sum(res, reduceAxes).reshape(a.shape);
+ }
+ return res;
+ };
+ const derB = () => {
+ let res = mul(dy, a.toFloat());
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = reshape(sum(res, reduceAxes), b.shape);
+ }
+ const tmp = square(b);
+ return neg(div(res, tmp.toFloat()));
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Div_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Elu_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const eluGradConfig = {
+ kernelName: kernel_names["z" /* Elu */],
+ outputsToSave: [true],
+ gradFunc: (dy, saved) => {
+ const [y] = saved;
+ const backPropKernelFunc = (backend) => {
+ return backend.eluDer(dy, y);
+ };
+ const inputs = { dy, y };
+ return {
+ x: () =>
+ engine["a" /* ENGINE */].runKernelFunc(
+ backPropKernelFunc,
+ inputs,
+ null /* grad */,
+ kernel_names["A" /* EluGrad */]
+ ),
+ };
+ },
+ };
+ //# sourceMappingURL=Elu_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/FloorDiv_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const floorDivGradConfig = {
+ kernelName: kernel_names["D" /* FloorDiv */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ const res = dy.div(b.toFloat());
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ return res.sum(reduceAxes).reshape(a.shape);
+ }
+ return res;
+ };
+ const derB = () => {
+ let res = dy.mul(a.toFloat());
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = res.sum(reduceAxes).reshape(b.shape);
+ }
+ const tmp = b.square();
+ return res.div(tmp.toFloat()).neg();
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=FloorDiv_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sub.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.sub(b).print(); // or tf.sub(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast subtract a with b.
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.scalar(5);
+ *
+ * a.sub(b).print(); // or tf.sub(a, b)
+ * ```
+ * @param a The first `tf.Tensor` to subtract from.
+ * @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as
+ * `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
+ function sub_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "sub");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "sub");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ const forward = (backend, save) => {
+ const res = backend.subtract($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["qb" /* Sub */]
+ );
+ }
+ const sub = Object(operation["a" /* op */])({ sub_ });
+ //# sourceMappingURL=sub.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tile.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Construct a tensor by repeating it the number of times given by reps.
+ *
+ * This operation creates a new tensor by replicating `input` `reps`
+ * times. The output tensor's i'th dimension has `input.shape[i] *
+ * reps[i]` elements, and the values of `input` are replicated
+ * `reps[i]` times along the i'th dimension. For example, tiling
+ * `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ *
+ * a.tile([2]).print(); // or a.tile([2])
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * a.tile([1, 2]).print(); // or a.tile([1, 2])
+ * ```
+ * @param x The tensor to tile.
+ * @param reps Determines the number of replications per dimension.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
+ function tile_(x, reps) {
+ const parseAs = null;
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tile", parseAs);
+ util["assert"](
+ $x.rank === reps.length,
+ () =>
+ `Error in transpose: rank of input ${$x.rank} ` + `must match length of reps ${reps}.`
+ );
+ const forward = (backend, save) => {
+ const res = backend.tile($x, reps);
+ save([$x]);
+ return res;
+ };
+ const inputsToSave = [$x];
+ const inputs = { x: $x };
+ const attrs = { reps };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["rb" /* Tile */],
+ attrs,
+ inputsToSave
+ );
+ }
+ const tile = Object(operation["a" /* op */])({ tile_ });
+ //# sourceMappingURL=tile.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/FusedBatchNorm_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const fusedBatchNormGradConfig = {
+ kernelName: kernel_names["F" /* FusedBatchNorm */],
+ inputsToSave: ["x", "mean", "variance", "scale"],
+ gradFunc: (dy, saved, attrs) => {
+ const { varianceEpsilon } = attrs;
+ const [x, mean, variance, scale] = saved;
+ const scaleValue = scale == null ? Object(tensor_ops["e" /* scalar */])(1) : scale;
+ const reductionAxes = getReductionAxes(mean.shape, x.shape);
+ const tileShape = [];
+ if (mean.rank === 1) {
+ for (let i = 0; i < x.shape.length - 1; ++i) {
+ tileShape.push(x.shape[i]);
+ }
+ tileShape.push(1);
+ }
+ const xMinusMean = sub(x, mean);
+ const dyTimesScaleValue = mul(dy, scaleValue);
+ const oneOverSqrtVariance = rsqrt(
+ add(variance, Object(tensor_ops["e" /* scalar */])(varianceEpsilon))
+ );
+ const minusHalfRCube = mul(
+ mul(mul(oneOverSqrtVariance, oneOverSqrtVariance), oneOverSqrtVariance),
+ Object(tensor_ops["e" /* scalar */])(-0.5)
+ );
+ const derX = () => {
+ if (mean.rank === 1) {
+ return reshape(
+ mul(
+ mul(dy, tile(oneOverSqrtVariance.as4D(1, 1, 1, mean.shape[0]), tileShape)),
+ scaleValue
+ ),
+ x.shape
+ );
+ } else {
+ return reshape(mul(mul(dy, oneOverSqrtVariance), scaleValue), x.shape);
+ }
+ };
+ const derMean = () => {
+ let meanDer = mul(
+ mul(oneOverSqrtVariance, Object(tensor_ops["e" /* scalar */])(-1)),
+ dyTimesScaleValue
+ );
+ if (mean.rank === 1) {
+ meanDer = sum(meanDer, reductionAxes);
+ }
+ return reshape(meanDer, mean.shape);
+ };
+ const derVariance = () => {
+ let varianceDer = mul(mul(minusHalfRCube, xMinusMean), dyTimesScaleValue);
+ if (mean.rank === 1) {
+ varianceDer = sum(varianceDer, reductionAxes);
+ }
+ return reshape(varianceDer, mean.shape);
+ };
+ const derScale = () => {
+ const xMinusMean2TimesRsqrt = mul(xMinusMean, oneOverSqrtVariance);
+ let scaleDer = mul(dy, xMinusMean2TimesRsqrt);
+ if (mean.rank === 1) {
+ scaleDer = sum(scaleDer, reductionAxes);
+ }
+ return reshape(scaleDer, mean.shape);
+ };
+ const derOffset = () => {
+ let offsetDer = dy;
+ if (mean.rank === 1) {
+ offsetDer = sum(offsetDer, reductionAxes);
+ }
+ return reshape(offsetDer, mean.shape);
+ };
+ return {
+ x: derX,
+ mean: derMean,
+ variance: derVariance,
+ scale: derScale,
+ offset: derOffset,
+ };
+ },
+ };
+ //# sourceMappingURL=FusedBatchNorm_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/GreaterEqual_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const greaterEqualGradConfig = {
+ kernelName: kernel_names["I" /* GreaterEqual */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ return {
+ a: () => Object(tensor_ops["o" /* zerosLike */])(a),
+ b: () => Object(tensor_ops["o" /* zerosLike */])(b),
+ };
+ },
+ };
+ //# sourceMappingURL=GreaterEqual_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Identity_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const identityGradConfig = {
+ kernelName: kernel_names["J" /* Identity */],
+ gradFunc: (dy) => {
+ return { x: () => dy.toFloat() };
+ },
+ };
+ //# sourceMappingURL=Identity_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization_backprop.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ function localResponseNormalizationBackprop_(
+ x,
+ y,
+ dy,
+ depthRadius = 5,
+ bias = 1,
+ alpha = 1,
+ beta = 0.5
+ ) {
+ const forward = (backend) => backend.LRNGrad(dy, x, y, depthRadius, bias, alpha, beta);
+ const inputs = { x, y, dy };
+ const attrs = { depthRadius, bias, alpha, beta };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["M" /* LRNBackprop */],
+ attrs
+ );
+ }
+ const localResponseNormalizationBackprop = Object(operation["a" /* op */])({
+ localResponseNormalizationBackprop_,
+ });
+ //# sourceMappingURL=local_response_normalization_backprop.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/LRN_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const lrnGradConfig = {
+ kernelName: kernel_names["L" /* LRN */],
+ inputsToSave: ["x"],
+ outputsToSave: [true],
+ gradFunc: (dy, saved, attrs) => {
+ const [x, y] = saved;
+ const { depthRadius, bias, alpha, beta } = attrs;
+ return {
+ x: () => localResponseNormalizationBackprop(x, y, dy, depthRadius, bias, alpha, beta),
+ };
+ },
+ };
+ //# sourceMappingURL=LRN_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Max_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const maxGradConfig = {
+ kernelName: kernel_names["P" /* Max */],
+ inputsToSave: ["x"],
+ outputsToSave: [true],
+ gradFunc: (dy, saved, attrs) => {
+ const maxAttrs = attrs;
+ const { reductionIndices } = maxAttrs;
+ const [x, y] = saved;
+ const origAxes = util["parseAxisParam"](reductionIndices, x.shape);
+ const permutedAxes = getAxesPermutation(origAxes, x.rank);
+ const maxGrad = gradForMinAndMax(dy, y, x, origAxes, permutedAxes);
+ return {
+ x: () => {
+ let out = maxGrad["x"]();
+ if (permutedAxes != null) {
+ out = transpose(out);
+ }
+ return out;
+ },
+ };
+ },
+ };
+ //# sourceMappingURL=Max_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Returns the truth value of (a >= b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.greaterEqual(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function greaterEqual_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "greaterEqual");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "greaterEqual");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ const forward = (backend, save) => {
+ const res = backend.greaterEqual($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["I" /* GreaterEqual */]
+ );
+ }
+ const greaterEqual = Object(operation["a" /* op */])({ greaterEqual_ });
+ //# sourceMappingURL=greater_equal.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/less.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Returns the truth value of (a < b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.less(b).print();
+ * ```
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function less_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "less");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "less");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ const forward = (backend) => backend.less($a, $b);
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["N" /* Less */]
+ );
+ }
+ const less = Object(operation["a" /* op */])({ less_ });
+ //# sourceMappingURL=less.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Maximum_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const maximumGradConfig = {
+ kernelName: kernel_names["V" /* Maximum */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const derA = () => mul(dy, cast(greaterEqual(a, b), "float32"));
+ const derB = () => mul(dy, cast(less(a, b), "float32"));
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Maximum_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d_backprop.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the backprop of a 3d max pool.
+ *
+ * @param dy The dy error, of rank 5 of shape
+ * [batchSize, depth, height, width, channels].
+ * assumed.
+ * @param input The original input image, of rank 5 or rank 4 of shape
+ * [batchSize, depth, height, width, channels].
+ * @param output The original output image, of rank 5 of shape
+ * [batchSize, outDepth, outHeight, outWidth, channels].
+ * @param filterSize The filter size:
+ * `[filterDepth, filterHeight, filterWidth]`.
+ * `filterSize` is a single number,
+ * then `filterDepth == filterHeight == filterWidth`.
+ * @param strides The strides of the pooling:
+ * `[strideDepth, strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dilations Deprecated, this field will be gone in v3.0.0.
+ * The dilation rates: `[dilationDepth, dilationHeight, dilationWidth]`
+ * in which we sample input values across the depth, height and width
+ * dimensions in dilated pooling.
+ * Defaults to `[1, 1, 1]`. If `dilations` is a single number,
+ * then `dilationDepth == dilationHeight == dilationWidth`.
+ * If it is greater than 1, then all values of `strides` must be 1.
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
+ * rounding mode used when computing output dimensions if pad is a
+ * number. If none is provided, it will not round and error if the output
+ * is of fractional size.
+ */
+ function maxPool3dBackprop_(
+ dy,
+ input,
+ output,
+ filterSize,
+ strides,
+ dilations = [1, 1, 1],
+ pad,
+ dimRoundingMode
+ ) {
+ const $dy = Object(tensor_util_env["a" /* convertToTensor */])(
+ dy,
+ "dy",
+ "maxPool3dBackprop"
+ );
+ const $input = Object(tensor_util_env["a" /* convertToTensor */])(
+ input,
+ "input",
+ "maxPool3dBackprop"
+ );
+ const $output = Object(tensor_util_env["a" /* convertToTensor */])(
+ output,
+ "output",
+ "maxPool3dBackprop"
+ );
+ let dy5D = $dy;
+ let input5D = $input;
+ let output5D = $output;
+ let reshapedTo5D = false;
+ if ($input.rank === 4) {
+ reshapedTo5D = true;
+ dy5D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]);
+ input5D = reshape($input, [
+ 1,
+ $input.shape[0],
+ $input.shape[1],
+ $input.shape[2],
+ $input.shape[3],
+ ]);
+ output5D = reshape($output, [
+ 1,
+ $output.shape[0],
+ $output.shape[1],
+ $output.shape[2],
+ $output.shape[3],
+ ]);
+ }
+ util["assert"](
+ dy5D.rank === 5,
+ () => `Error in maxPool3dBackprop: dy must be rank 5 but got rank ` + `${dy5D.rank}.`
+ );
+ util["assert"](
+ input5D.rank === 5,
+ () =>
+ `Error in maxPool3dBackprop: input must be rank 5 but got rank ` + `${input5D.rank}.`
+ );
+ util["assert"](
+ output5D.rank === 5,
+ () =>
+ `Error in maxPool3dBackprop: output must be rank 5 but got rank ` + `${output5D.rank}.`
+ );
+ util["assert"](
+ eitherStridesOrDilationsAreOne(strides, dilations),
+ () =>
+ "Error in maxPool3dBackprop: Either strides or dilations " +
+ `must be 1. Got strides ${strides} and dilations '${dilations}'`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in maxPool3dBackprop: pad must be an integer when ` +
+ `using, dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const forward = (backend) => {
+ const convInfo = computePool3DInfo(
+ input5D.shape,
+ filterSize,
+ strides,
+ dilations,
+ pad,
+ dimRoundingMode
+ );
+ return backend.maxPool3dBackprop(dy5D, input5D, output5D, convInfo);
+ };
+ const inputs = { dy: dy5D, input: input5D, output: output5D };
+ const attrs = { filterSize, strides, dilations, pad, dimRoundingMode };
+ const res = engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["S" /* MaxPool3DBackprop */],
+ attrs
+ );
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ const maxPool3dBackprop = Object(operation["a" /* op */])({ maxPool3dBackprop_ });
+ //# sourceMappingURL=max_pool_3d_backprop.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool3D_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const maxPool3DGradConfig = {
+ kernelName: kernel_names["R" /* MaxPool3D */],
+ inputsToSave: ["x"],
+ outputsToSave: [true],
+ gradFunc: (dy, saved, attrs) => {
+ const [x, y] = saved;
+ const { filterSize, strides, dilations, pad, dimRoundingMode } = attrs;
+ const $dilations = dilations == null ? [1, 1, 1] : dilations;
+ return {
+ x: () =>
+ maxPool3dBackprop(dy, x, y, filterSize, strides, $dilations, pad, dimRoundingMode),
+ };
+ },
+ };
+ //# sourceMappingURL=MaxPool3D_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_backprop.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the backprop of a 2D max pool.
+ *
+ * @param dy The dy error, of rank 4 or rank 3 of shape
+ * [batchSize, height, width, channels]. If rank 3, batch of 1 is
+ * assumed.
+ * @param input The original input image, of rank 4, of shape
+ * [batchSize, height, width, channels].
+ * @param output The original output image, of rank 4, of shape
+ * [batchSize, outHeight, outWidth, channels].
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
+ * rounding mode used when computing output dimensions if pad is a
+ * number. If none is provided, it will not round and error if the output
+ * is of fractional size.
+ */
+ function maxPoolBackprop_(dy, input, output, filterSize, strides, pad, dimRoundingMode) {
+ const $dy = Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "maxPoolBackprop");
+ const $input = Object(tensor_util_env["a" /* convertToTensor */])(
+ input,
+ "input",
+ "maxPoolBackprop"
+ );
+ const $output = Object(tensor_util_env["a" /* convertToTensor */])(
+ output,
+ "output",
+ "maxPoolBackprop"
+ );
+ util["assert"](
+ $input.rank === $dy.rank,
+ () => `Rank of input (${$input.rank}) does not match rank of dy ` + `(${$dy.rank})`
+ );
+ util["assert"](
+ $dy.rank === 4,
+ () => `Error in maxPoolBackprop: dy must be rank 4 but got rank ` + `${$dy.rank}.`
+ );
+ util["assert"](
+ $input.rank === 4,
+ () => `Error in maxPoolBackprop: input must be rank 4 but got rank ` + `${$input.rank}.`
+ );
+ if (dimRoundingMode != null) {
+ util["assert"](
+ util["isInt"](pad),
+ () =>
+ `Error in maxPoolBackprop: pad must be an integer when using, ` +
+ `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
+ );
+ }
+ const forward = (backend) => {
+ const convInfo = computePool2DInfo(
+ $input.shape,
+ filterSize,
+ strides,
+ 1 /* dilations */,
+ pad,
+ dimRoundingMode
+ );
+ return backend.maxPoolBackprop($dy, $input, $output, convInfo);
+ };
+ const inputs = { dy: $dy, input: $input, output: $output };
+ const attrs = { filterSize, strides, pad, dimRoundingMode };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null,
+ kernel_names["T" /* MaxPoolBackprop */],
+ attrs
+ );
+ }
+ const maxPoolBackprop = Object(operation["a" /* op */])({ maxPoolBackprop_ });
+ //# sourceMappingURL=max_pool_backprop.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const maxPoolGradConfig = {
+ kernelName: kernel_names["Q" /* MaxPool */],
+ inputsToSave: ["x"],
+ outputsToSave: [true],
+ gradFunc: (dy, saved, attrs) => {
+ const [x, y] = saved;
+ const { filterSize, strides, pad } = attrs;
+ return {
+ x: () => maxPoolBackprop(dy, x, y, filterSize, strides, pad),
+ };
+ },
+ };
+ //# sourceMappingURL=MaxPool_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/greater.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Returns the truth value of (a > b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.greater(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function greater_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "greater");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "greater");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ const forward = (backend) => backend.greater($a, $b);
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["H" /* Greater */]
+ );
+ }
+ const greater = Object(operation["a" /* op */])({ greater_ });
+ //# sourceMappingURL=greater.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Returns the truth value of (a <= b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.lessEqual(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function lessEqual_(a, b) {
+ let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "lessEqual");
+ let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "lessEqual");
+ [$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ const forward = (backend, save) => {
+ const res = backend.lessEqual($a, $b);
+ save([$a, $b]);
+ return res;
+ };
+ const inputs = { a: $a, b: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["O" /* LessEqual */]
+ );
+ }
+ const lessEqual = Object(operation["a" /* op */])({ lessEqual_ });
+ //# sourceMappingURL=less_equal.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Minimum_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const minimumGradConfig = {
+ kernelName: kernel_names["W" /* Minimum */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const derA = () => mul(dy, cast(lessEqual(a, b), "float32"));
+ const derB = () => mul(dy, cast(greater(a, b), "float32"));
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Minimum_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Mod_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const modGradConfig = {
+ kernelName: kernel_names["X" /* Mod */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ return reshape(sum(dy, reduceAxes), a.shape);
+ }
+ return dy;
+ };
+ const derB = () => {
+ const res = mul(dy, neg(floor(div(a, b))));
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ return reshape(sum(res, reduceAxes), b.shape);
+ }
+ return res;
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Mod_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Multiply_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const multiplyGradConfig = {
+ kernelName: kernel_names["Y" /* Multiply */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ const res = mul(dy, cast(b, "float32"));
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ return reshape(sum(res, reduceAxes), a.shape);
+ }
+ return res;
+ };
+ const derB = () => {
+ const res = mul(dy, cast(a, "float32"));
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ return reshape(sum(res, reduceAxes), b.shape);
+ }
+ return res;
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Multiply_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/OneHot_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const oneHotGradConfig = {
+ kernelName: kernel_names["cb" /* OneHot */],
+ inputsToSave: ["indices"],
+ gradFunc: (dy, saved) => {
+ const indices = saved[0];
+ return { indices: () => Object(tensor_ops["n" /* zeros */])(indices.shape, "float32") };
+ },
+ };
+ //# sourceMappingURL=OneHot_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/PadV2_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const padV2GradConfig = {
+ kernelName: kernel_names["db" /* PadV2 */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved, attrs) => {
+ // Pad introduces values around the original tensor, so the gradient
+ // slices the original shape out of the gradient.
+ const x = saved[0];
+ const { paddings } = attrs;
+ const begin = paddings.map((p) => p[0]);
+ return { x: () => dy.slice(begin, x.shape) };
+ },
+ };
+ //# sourceMappingURL=PadV2_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /** An implementation of the Where kernel shared between cpu and webgl */
+
+ function whereImpl(condShape, condVals) {
+ const indices = [];
+ for (let i = 0; i < condVals.length; i++) {
+ if (condVals[i]) {
+ indices.push(i);
+ }
+ }
+ const inBuffer = array_ops_buffer(condShape, "int32");
+ const out = array_ops_buffer([indices.length, condShape.length], "int32");
+ for (let i = 0; i < indices.length; i++) {
+ const loc = inBuffer.indexToLoc(indices[i]);
+ const offset = i * condShape.length;
+ out.values.set(loc, offset);
+ }
+ return out.toTensor();
+ }
+ //# sourceMappingURL=where_impl.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/logical_ops.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Returns the truth value of `NOT x` element-wise.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, true], 'bool');
+ *
+ * a.logicalNot().print();
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype 'bool'.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function logicalNot_(x) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "logicalNot", "bool");
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.logicalNot($x), { $x });
+ }
+ /**
+ * Returns the truth value of `a AND b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalAnd(b).print();
+ * ```
+ *
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function logicalAnd_(a, b) {
+ const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "logicalAnd", "bool");
+ const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "logicalAnd", "bool");
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.logicalAnd($a, $b),
+ { a: $a, b: $b },
+ null /* grad */,
+ "LogicalAnd"
+ );
+ }
+ /**
+ * Returns the truth value of `a OR b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalOr(b).print();
+ * ```
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function logicalOr_(a, b) {
+ const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "logicalOr", "bool");
+ const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "logicalOr", "bool");
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.logicalOr($a, $b), {
+ $a,
+ $b,
+ });
+ }
+ /**
+ * Returns the truth value of `a XOR b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalXor(b).print();
+ * ```
+ *
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function logicalXor_(a, b) {
+ const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "logicalXor", "bool");
+ const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "logicalXor", "bool");
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ // x ^ y = (x | y) & ~(x & y)
+ return logicalOr(a, b).logicalAnd(logicalAnd(a, b).logicalNot());
+ }
+ /**
+ * Returns the elements, either `a` or `b` depending on the `condition`.
+ *
+ * If the condition is true, select from `a`, otherwise select from `b`.
+ *
+ * ```js
+ * const cond = tf.tensor1d([false, false, true], 'bool');
+ * const a = tf.tensor1d([1 , 2, 3]);
+ * const b = tf.tensor1d([-1, -2, -3]);
+ *
+ * a.where(cond, b).print();
+ * ```
+ *
+ * @param condition The input condition. Must be of dtype bool.
+ * @param a If `condition` is rank 1, `a` may have a higher rank but
+ * its first dimension must match the size of `condition`.
+ * @param b A tensor with the same shape and type as `a`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ function where_(condition, a, b) {
+ const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "where");
+ const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "where");
+ const $condition = Object(tensor_util_env["a" /* convertToTensor */])(
+ condition,
+ "condition",
+ "where",
+ "bool"
+ );
+ Object(util["assertShapesMatch"])($a.shape, $b.shape, "Error in where: ");
+ if ($condition.rank === 1) {
+ // If condition rank is 1, then the first dimension must match the size of
+ // condition.
+ Object(util["assert"])(
+ $condition.shape[0] === $a.shape[0],
+ () => "The first dimension of `a` must match the size of `condition`."
+ );
+ } else {
+ // A must have the same shape as condition.
+ Object(util["assertShapesMatch"])($condition.shape, $b.shape, "Error in where: ");
+ }
+ // TODO(julianoks): Return null for condition gradient
+ // when backprop supports it.
+ const grad = (dy, saved) => {
+ const [$condition] = saved;
+ return {
+ condition: () => Object(tensor_ops["o" /* zerosLike */])($condition).toFloat(),
+ t: () => dy.mul($condition.cast(dy.dtype)),
+ e: () => dy.mul($condition.logicalNot().cast(dy.dtype)),
+ };
+ };
+ const inputs = { condition: $condition, t: $a, e: $b };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend, save) => {
+ const res = backend.select($condition, $a, $b);
+ save([$condition]);
+ return res;
+ },
+ inputs,
+ grad,
+ kernel_names["kb" /* SelectV2 */]
+ );
+ }
+ /**
+ * Returns the coordinates of true elements of condition.
+ *
+ * The coordinates are returned in a 2-D tensor where the first dimension (rows)
+ * represents the number of true elements, and the second dimension (columns)
+ * represents the coordinates of the true elements. Keep in mind, the shape of
+ * the output tensor can vary depending on how many true values there are in
+ * input. Indices are output in row-major order. The resulting tensor has the
+ * shape `[numTrueElems, condition.rank]`.
+ *
+ * This is analogous to calling the python `tf.where(cond)` without an x or y.
+ *
+ * ```js
+ * const cond = tf.tensor1d([false, false, true], 'bool');
+ * const result = await tf.whereAsync(cond);
+ * result.print();
+ * ```
+ */
+ /** @doc {heading: 'Operations', subheading: 'Logical'} */
+ async function whereAsync_(condition) {
+ const $condition = Object(tensor_util_env["a" /* convertToTensor */])(
+ condition,
+ "condition",
+ "whereAsync",
+ "bool"
+ );
+ const vals = await $condition.data();
+ const res = whereImpl($condition.shape, vals);
+ if (condition !== $condition) {
+ $condition.dispose();
+ }
+ return res;
+ }
+ const logicalAnd = Object(operation["a" /* op */])({ logicalAnd_ });
+ const logicalNot = Object(operation["a" /* op */])({ logicalNot_ });
+ const logicalOr = Object(operation["a" /* op */])({ logicalOr_ });
+ const logicalXor = Object(operation["a" /* op */])({ logicalXor_ });
+ const where = Object(operation["a" /* op */])({ where_ });
+ const whereAsync = whereAsync_;
+ //# sourceMappingURL=logical_ops.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pow.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the power of one `tf.Tensor` to another. Supports broadcasting.
+ *
+ * Given a `tf.Tensor` x and a `tf.Tensor` y, this operation computes x^y for
+ * corresponding elements in x and y. The result's dtype will be the upcasted
+ * type of the `base` and `exp` dtypes.
+ *
+ * ```js
+ * const a = tf.tensor([[2, 3], [4, 5]])
+ * const b = tf.tensor([[1, 2], [3, 0]]).toInt();
+ *
+ * a.pow(b).print(); // or tf.pow(a, b)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor([[1, 2], [3, 4]])
+ * const b = tf.tensor(2).toInt();
+ *
+ * a.pow(b).print(); // or tf.pow(a, b)
+ * ```
+ * We also expose `powStrict` which has the same signature as this op and
+ * asserts that `base` and `exp` are the same shape (does not broadcast).
+ *
+ * @param base The base `tf.Tensor` to pow element-wise.
+ * @param exp The exponent `tf.Tensor` to pow element-wise.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
+ function pow_(base, exp) {
+ let $base = Object(tensor_util_env["a" /* convertToTensor */])(base, "base", "pow");
+ let $exp = Object(tensor_util_env["a" /* convertToTensor */])(exp, "exp", "pow");
+ [$base, $exp] = Object(tensor_util["makeTypesMatch"])($base, $exp);
+ const inputs = { a: $base, b: $exp };
+ const forward = (backend, save) => {
+ const y = backend.pow($base, $exp);
+ save([$base, $exp, y]);
+ return y;
+ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["fb" /* Pow */]
+ );
+ }
+ const pow = Object(operation["a" /* op */])({ pow_ });
+ //# sourceMappingURL=pow.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Pow_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const powGradConfig = {
+ kernelName: kernel_names["fb" /* Pow */],
+ inputsToSave: ["a", "b"],
+ outputsToSave: [true],
+ gradFunc: (dy, saved) => {
+ const [a, b, y] = saved;
+ const base = a;
+ const exp = b;
+ const outShape = assertAndGetBroadcastShape(base.shape, exp.shape);
+ const derBase = () => {
+ const expFloat = cast(exp, "float32");
+ let res = mul(
+ dy,
+ mul(expFloat, pow(base, sub(expFloat, Object(tensor_ops["e" /* scalar */])(1))))
+ );
+ const reduceAxes = getReductionAxes(base.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, base.shape);
+ };
+ const derExp = () => {
+ const condition = greater(base, 0);
+ const logBase = where(
+ condition,
+ log(base),
+ Object(tensor_ops["o" /* zerosLike */])(base)
+ );
+ let res = mul(dy, mul(y, logBase));
+ const reduceAxes = getReductionAxes(exp.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, exp.shape);
+ };
+ return { a: derBase, b: derExp };
+ },
+ };
+ //# sourceMappingURL=Pow_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Prelu_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const preluGradConfig = {
+ kernelName: kernel_names["gb" /* Prelu */],
+ inputsToSave: ["x", "alpha"],
+ gradFunc: (dy, saved) => {
+ const [x, alpha] = saved;
+ const mask = greater(x, 0);
+ return {
+ x: () => where(mask, dy, mul(dy, alpha)),
+ alpha: () => {
+ let res = where(mask, Object(tensor_ops["o" /* zerosLike */])(dy), mul(dy, x));
+ const reduceAxes = getReductionAxes(alpha.shape, dy.shape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, alpha.shape);
+ },
+ };
+ },
+ };
+ //# sourceMappingURL=Prelu_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Relu6_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const relu6GradConfig = {
+ kernelName: kernel_names["jb" /* Relu6 */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved) => {
+ const [x] = saved;
+ const mask = mul(lessEqual(x, 6), unary_ops_step(x));
+ return { x: () => mul(dy, cast(mask, "float32")) };
+ },
+ };
+ //# sourceMappingURL=Relu6_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Relu_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const reluGradConfig = {
+ kernelName: kernel_names["ib" /* Relu */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved) => {
+ const [x] = saved;
+ return { x: () => mul(dy, cast(unary_ops_step(x), "float32")) };
+ },
+ };
+ //# sourceMappingURL=Relu_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ const SELU_SCALEALPHA = 1.7580993408473768599402175208123;
+ const SELU_SCALE = 1.0507009873554804934193349852946;
+ //# sourceMappingURL=selu_util.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Selu_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const seluGradConfig = {
+ kernelName: kernel_names["lb" /* Selu */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved) => {
+ const [x] = saved;
+ return {
+ x: () => {
+ const mask = greater(x, Object(tensor_ops["e" /* scalar */])(0));
+ const scaleAlpha = Object(tensor_ops["e" /* scalar */])(SELU_SCALEALPHA);
+ const scale = Object(tensor_ops["e" /* scalar */])(SELU_SCALE);
+ const greaterThanZeroDer = mul(dy, scale);
+ const lessEqualZeroDer = mul(mul(dy, scaleAlpha), unary_ops_exp(cast(x, "float32")));
+ return where(mask, greaterThanZeroDer, lessEqualZeroDer);
+ },
+ };
+ },
+ };
+ //# sourceMappingURL=Selu_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
+ * shape `blockShape + [batch]`, interleaves these blocks back into the grid
+ * defined by the spatial dimensions `[1, ..., M]`, to obtain a result with
+ * the same rank as the input. The spatial dimensions of this intermediate
+ * result are then optionally cropped according to `crops` to produce the
+ * output. This is the reverse of `tf.spaceToBatchND`. See below for a precise
+ * description.
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]);
+ * const blockShape = [2, 2];
+ * const crops = [[0, 0], [0, 0]];
+ *
+ * x.batchToSpaceND(blockShape, crops).print();
+ * ```
+ *
+ * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
+ * remainingShape`, where spatialShape has `M` dimensions.
+ * @param blockShape A 1-D array. Must have shape `[M]`, all values must
+ * be >= 1.
+ * @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0.
+ * `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input
+ * dimension `i + 1`, which corresponds to spatial dimension `i`. It is required
+ * that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]`
+ *
+ * This operation is equivalent to the following steps:
+ *
+ * 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ...,
+ * blockShape[M-1], batch / prod(blockShape), x.shape[1], ...,
+ * x.shape[N-1]]`
+ *
+ * 2. Permute dimensions of `reshaped`to produce `permuted` of shape `[batch /
+ * prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M],
+ * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch /
+ * prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] *
+ * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted`
+ * according to `crops` to produce the output of shape: `[batch /
+ * prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1],
+ * ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] -
+ * crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]`
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function batchToSpaceND_(x, blockShape, crops) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "batchToSpaceND");
+ const prod = blockShape.reduce((a, b) => a * b);
+ util["assert"](
+ $x.rank >= 1 + blockShape.length,
+ () =>
+ `input rank is ${$x.rank} but should be > than blockShape.length ${blockShape.length}`
+ );
+ util["assert"](
+ crops.length === blockShape.length,
+ () =>
+ `crops.length is ${crops.length} but should be equal to blockShape.length ${blockShape.length}`
+ );
+ util["assert"](
+ $x.shape[0] % prod === 0,
+ () =>
+ `input tensor batch is ${$x.shape[0]} but is not divisible by the product of ` +
+ `the elements of blockShape ${blockShape.join(" * ")} === ${prod}`
+ );
+ const forward = (backend) => {
+ return backend.batchToSpaceND($x, blockShape, crops);
+ };
+ const inputs = { x: $x };
+ const attrs = { blockShape, crops };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* gradient */,
+ kernel_names["i" /* BatchToSpaceND */],
+ attrs
+ );
+ }
+ const batchToSpaceND = Object(operation["a" /* op */])({ batchToSpaceND_ });
+ //# sourceMappingURL=batch_to_space_nd.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/SpaceToBatchND_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const spaceToBatchNDGradConfig = {
+ kernelName: kernel_names["mb" /* SpaceToBatchND */],
+ gradFunc: (dy, saved, attrs) => {
+ const { blockShape, paddings } = attrs;
+ return { x: () => batchToSpaceND(dy, blockShape, paddings) };
+ },
+ };
+ //# sourceMappingURL=SpaceToBatchND_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/SplitV_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const splitVGradConfig = {
+ kernelName: kernel_names["nb" /* SplitV */],
+ gradFunc: (dy, saved, attrs) => {
+ const { axis } = attrs;
+ return { x: () => concat(dy, axis) };
+ },
+ };
+ //# sourceMappingURL=SplitV_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Square_grad.js
+ /**
+ * @license
+ * Copyright 2019 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const squareGradConfig = {
+ kernelName: kernel_names["ob" /* Square */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved) => {
+ const [x] = saved;
+ return { x: () => mul(dy, mul(x.toFloat(), 2)) };
+ },
+ };
+ //# sourceMappingURL=Square_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/SquaredDifference_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const squaredDifferenceGradConfig = {
+ kernelName: kernel_names["pb" /* SquaredDifference */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const two = Object(tensor_ops["e" /* scalar */])(2);
+ const derA = () => mul(dy, mul(two, sub(a, b)));
+ const derB = () => mul(dy, mul(two, sub(b, a)));
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=SquaredDifference_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Sub_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const subGradConfig = {
+ kernelName: kernel_names["qb" /* Sub */],
+ inputsToSave: ["a", "b"],
+ gradFunc: (dy, saved) => {
+ const [a, b] = saved;
+ const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
+ const derA = () => {
+ let res = dy;
+ const reduceAxes = getReductionAxes(a.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, a.shape);
+ };
+ const derB = () => {
+ let res = dy;
+ const reduceAxes = getReductionAxes(b.shape, outShape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(neg(res), b.shape);
+ };
+ return { a: derA, b: derB };
+ },
+ };
+ //# sourceMappingURL=Sub_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Pads a `tf.Tensor` with a given value and paddings.
+ *
+ * This operation currently only implements the `CONSTANT` mode.
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that `paddings` is of given length.
+ * - `tf.pad1d`
+ * - `tf.pad2d`
+ * - `tf.pad3d`
+ * - `tf.pad4d`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * x.pad([[1, 2]]).print();
+ * ```
+ * @param x The tensor to pad.
+ * @param paddings An array of length `R` (the rank of the tensor), where
+ * each element is a length-2 tuple of ints `[padBefore, padAfter]`,
+ * specifying how much to pad along each dimension of the tensor.
+ * @param constantValue The pad value to use. Defaults to 0.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function pad_(x, paddings, constantValue = 0) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "pad");
+ if ($x.rank === 0) {
+ throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");
+ }
+ const forward = (backend, save) => {
+ save([$x]);
+ return backend.pad($x, paddings, constantValue);
+ };
+ const attrs = { paddings, constantValue };
+ const inputs = { x: $x };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["db" /* PadV2 */],
+ attrs
+ );
+ }
+ const pad_pad = Object(operation["a" /* op */])({ pad_ });
+ //# sourceMappingURL=pad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js
+ /**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ function assertParamsValid(input, begin, size) {
+ util["assert"](
+ input.rank === begin.length,
+ () =>
+ `Error in slice${input.rank}D: Length of begin ${begin} must ` +
+ `match the rank of the array (${input.rank}).`
+ );
+ util["assert"](
+ input.rank === size.length,
+ () =>
+ `Error in slice${input.rank}D: Length of size ${size} must ` +
+ `match the rank of the array (${input.rank}).`
+ );
+ for (let i = 0; i < input.rank; ++i) {
+ util["assert"](
+ begin[i] + size[i] <= input.shape[i],
+ () =>
+ `Error in slice${input.rank}D: begin[${i}] + size[${i}] ` +
+ `(${begin[i] + size[i]}) would overflow input.shape[${i}] (${input.shape[i]})`
+ );
+ }
+ }
+ /** Converts a binary mask to an array of axes. Used in stridedSlice(). */
+ function maskToAxes(mask) {
+ const axes = [];
+ let axis = 0;
+ while (mask > 0) {
+ if (mask & 1) {
+ axes.push(axis);
+ }
+ mask /= 2;
+ axis++;
+ }
+ return axes;
+ }
+ /** Computes the output shape given the strided slice params. */
+ function slice_util_computeOutShape(begin, end, strides) {
+ const size = [];
+ for (let axis = 0; axis < begin.length; axis++) {
+ size[axis] = Math.ceil((end[axis] - begin[axis]) / strides[axis]);
+ }
+ return size;
+ }
+ // Creates full selection at the elided dimensions. If the dimension matches
+ // the ellipsis mask, override the current stride value. Otherwise, insert.
+ function stridesWithElidedDims(strides, ellipsisInsertionIndex, numElidedAxes) {
+ const newStrides = [...strides];
+ for (let i = 0; i < numElidedAxes; i++) {
+ if (i === 0) {
+ newStrides[ellipsisInsertionIndex] = 1;
+ } else {
+ newStrides.splice(
+ ellipsisInsertionIndex,
+ 0 /* num elements to delete */,
+ 1 /* element to add */
+ );
+ newStrides.pop();
+ }
+ }
+ return newStrides;
+ }
+ // Creates full selection at the elided dimensions. If the dimension matches
+ // the ellipsis mask, override the current start value. Otherwise, insert.
+ function startIndicesWithElidedDims(startIndices, ellipsisInsertionIndex, numElidedAxes) {
+ const newIndices = [...startIndices];
+ for (let i = 0; i < numElidedAxes; i++) {
+ if (i === 0) {
+ newIndices[ellipsisInsertionIndex] = 0;
+ } else {
+ newIndices.splice(
+ ellipsisInsertionIndex,
+ 0 /* num elements to delete */,
+ 0 /* element to add */
+ );
+ newIndices.pop();
+ }
+ }
+ return newIndices;
+ }
+ // Creates full selection at the elided dimensions. If the dimension matches
+ // the ellipsis mask, override the current stop value. Otherwise, insert.
+ function stopIndicesWithElidedDims(
+ stopIndices,
+ ellipsisInsertionIndex,
+ numElidedAxes,
+ inputShape
+ ) {
+ const newIndices = [...stopIndices];
+ for (let i = 0; i < numElidedAxes; i++) {
+ if (i === 0) {
+ newIndices[ellipsisInsertionIndex] = Number.MAX_SAFE_INTEGER;
+ } else {
+ newIndices.splice(
+ ellipsisInsertionIndex,
+ 0 /* num elements to delete */,
+ Number.MAX_SAFE_INTEGER /* element to add */
+ );
+ newIndices.pop();
+ }
+ }
+ for (let i = 0; i < newIndices.length; i++) {
+ newIndices[i] = util["clamp"](0, newIndices[i], inputShape[i]);
+ }
+ return newIndices;
+ }
+ function stridesForAxis(strides, axis, ellipsisMask) {
+ let stride = strides[axis];
+ if (ellipsisMask & (1 << axis) || stride == null) {
+ stride = 1;
+ }
+ return stride;
+ }
+ function startForAxis(beginMask, startIndices, strides, inputShape, axis, ellipsisMask) {
+ // Begin with the specified index
+ let start = startIndices[axis];
+ const stride = strides[axis] || 1;
+ // Check the axis bit from right of masked axes, or the begin index is not set
+ // for the axis.
+ if (beginMask & (1 << axis) || ellipsisMask & (1 << axis) || start == null) {
+ if (stride > 0) {
+ // Forward iteration - use the first element. These values will get
+ // clamped below (Note: We could have set them to 0 and axis_size-1, but
+ // use lowest() and max() to maintain symmetry with StopForAxis())
+ start = Number.MIN_SAFE_INTEGER;
+ } else {
+ // Backward iteration - use the last element.
+ start = Number.MAX_SAFE_INTEGER;
+ }
+ }
+ // Handle negative indices
+ const axisSize = inputShape[axis];
+ if (start < 0) {
+ start += axisSize;
+ }
+ // Clamping
+ start = util["clamp"](0, start, axisSize - 1);
+ return start;
+ }
+ function stopForAxis(endMask, stopIndices, strides, inputShape, axis, ellipsisMask) {
+ // Begin with the specified index
+ let stop = stopIndices[axis];
+ const stride = strides[axis] || 1;
+ // Check the axis bit from right of masked axes, or if the stop index is not
+ // set for this axis.
+ if (endMask & (1 << axis) || ellipsisMask & (1 << axis) || stop == null) {
+ if (stride > 0) {
+ // Forward iteration - use the last element. These values will get
+ // clamped below
+ stop = Number.MAX_SAFE_INTEGER;
+ } else {
+ // Backward iteration - use the first element.
+ stop = Number.MIN_SAFE_INTEGER;
+ }
+ }
+ // Handle negative indices
+ const axisSize = inputShape[axis];
+ if (stop < 0) {
+ stop += axisSize;
+ }
+ // Clamping
+ // Because the end index points one past the last element, we need slightly
+ // different clamping ranges depending on the direction.
+ if (stride > 0) {
+ // Forward iteration
+ stop = util["clamp"](0, stop, axisSize);
+ } else {
+ // Backward iteration
+ stop = util["clamp"](-1, stop, axisSize - 1);
+ }
+ return stop;
+ }
+ /**
+ * Returns true if the slice occupies a continous set of elements in the
+ * 'flat' space.
+ */
+ function isSliceContinous(shape, begin, size) {
+ // Index of the first axis that has size > 1.
+ let firstNonOneAxis = size.length;
+ for (let i = 0; i < size.length; i++) {
+ if (size[i] > 1) {
+ firstNonOneAxis = i;
+ break;
+ }
+ }
+ for (let i = firstNonOneAxis + 1; i < size.length; i++) {
+ if (begin[i] > 0 || size[i] !== shape[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ function computeFlatOffset(begin, strides) {
+ let flatOffset = begin.length > 0 ? begin[begin.length - 1] : 1;
+ for (let i = 0; i < begin.length - 1; i++) {
+ flatOffset += begin[i] * strides[i];
+ }
+ return flatOffset;
+ }
+ //# sourceMappingURL=slice_util.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Extracts a 1D slice from 1D array starting at coordinates `begin` and is
+ * of length `size`. See `slice` for details.
+ */
+ function slice1d_(x, begin, size) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice1d");
+ util["assert"](
+ $x.rank === 1,
+ () => `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`
+ );
+ return slice($x, [begin], [size]);
+ }
+ /**
+ * Extracts a 2D slice from a 2D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice2d_(x, begin, size) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice2d");
+ util["assert"](
+ $x.rank === 2,
+ () => `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`
+ );
+ return slice($x, begin, size);
+ }
+ /**
+ * Extracts a 3D slice from a 3D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice3d_(x, begin, size) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice3d");
+ util["assert"](
+ $x.rank === 3,
+ () => `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`
+ );
+ return slice($x, begin, size);
+ }
+ /**
+ * Extracts a 4D slice from a 4D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice4d_(x, begin, size) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice4d");
+ util["assert"](
+ $x.rank === 4,
+ () => `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`
+ );
+ return slice($x, begin, size);
+ }
+ /**
+ * Extracts a slice from a `tf.Tensor` starting at coordinates `begin`
+ * and is of size `size`.
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that `x` is of the given rank:
+ * - `tf.slice1d`
+ * - `tf.slice2d`
+ * - `tf.slice3d`
+ * - `tf.slice4d`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.slice([1], [2]).print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * x.slice([1, 0], [1, 2]).print();
+ * ```
+ * @param x The input `tf.Tensor` to slice from.
+ * @param begin The coordinates to start the slice from. The length can be
+ * less than the rank of x - the rest of the axes will have implicit 0 as
+ * start. Can also be a single number, in which case it specifies the
+ * first axis.
+ * @param size The size of the slice. The length can be less than the rank of
+ * x - the rest of the axes will have implicit -1. A value of -1 requests
+ * the rest of the dimensions in the axis. Can also be a single number,
+ * in which case it specifies the size of the first axis.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
+ function slice_(x, begin, size) {
+ const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice");
+ if ($x.rank === 0) {
+ throw new Error("Slicing scalar is not possible");
+ }
+ // The following logic allows for more ergonomic calls.
+ let begin_;
+ if (typeof begin === "number") {
+ begin_ = [begin, ...new Array($x.rank - 1).fill(0)];
+ } else if (begin.length < $x.rank) {
+ begin_ = begin.concat(new Array($x.rank - begin.length).fill(0));
+ } else {
+ begin_ = begin.slice();
+ }
+ begin_.forEach((d) => {
+ util["assert"](d !== -1, () => "slice() does not support negative begin indexing.");
+ });
+ let size_;
+ if (size == null) {
+ size_ = new Array($x.rank).fill(-1);
+ } else if (typeof size === "number") {
+ size_ = [size, ...new Array($x.rank - 1).fill(-1)];
+ } else if (size.length < $x.rank) {
+ size_ = size.concat(new Array($x.rank - size.length).fill(-1));
+ } else {
+ size_ = size;
+ }
+ size_ = size_.map((d, i) => {
+ if (d >= 0) {
+ return d;
+ } else {
+ util["assert"](
+ d === -1,
+ () =>
+ `Negative size values should be exactly -1 but got ` +
+ `${d} for the slice() size at index ${i}.`
+ );
+ return $x.shape[i] - begin_[i];
+ }
+ });
+ assertParamsValid($x, begin_, size_);
+ const inputShape = $x.shape;
+ const grad = (dy) => {
+ // Create an Nx2 padding where the first column represents how many
+ // zeros are prepended (at start) for each dimension, and the second
+ // column indicates how many zeros are appended (at end).
+ // The number of zeros to append is the shape of the input
+ // elementwise-subtracted by both the begin vector and sizes vector.
+ const paddings = [];
+ for (let i = 0; i < dy.rank; i++) {
+ paddings.push([begin_[i], inputShape[i] - begin_[i] - size_[i]]);
+ }
+ return { x: () => pad_pad(dy, paddings) };
+ };
+ const attrs = { begin: begin_, size: size_ };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ (backend) => backend.slice($x, begin_, size_),
+ { x: $x },
+ grad,
+ "Slice",
+ attrs
+ );
+ }
+ const slice = Object(operation["a" /* op */])({ slice_ });
+ const slice1d = Object(operation["a" /* op */])({ slice1d_ });
+ const slice2d = Object(operation["a" /* op */])({ slice2d_ });
+ const slice3d = Object(operation["a" /* op */])({ slice3d_ });
+ const slice4d = Object(operation["a" /* op */])({ slice4d_ });
+ //# sourceMappingURL=slice.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Tile_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const tileGradConfig = {
+ kernelName: kernel_names["rb" /* Tile */],
+ inputsToSave: ["x"],
+ gradFunc: (dy, saved, attrs) => {
+ const [x] = saved;
+ const { reps } = attrs;
+ const derX = () => {
+ let xGrad = Object(tensor_ops["o" /* zerosLike */])(x);
+ // TODO(cais): Maybe reduce memory footprint by avoiding repeated
+ // slicing.
+ if (x.rank === 1) {
+ for (let i = 0; i < reps[0]; ++i) {
+ xGrad = add(xGrad, slice(dy, [i * x.shape[0]], [x.shape[0]]));
+ }
+ } else if (x.rank === 2) {
+ for (let i = 0; i < reps[0]; ++i) {
+ for (let j = 0; j < reps[1]; ++j) {
+ xGrad = add(
+ xGrad,
+ slice(dy, [i * x.shape[0], j * x.shape[1]], [x.shape[0], x.shape[1]])
+ );
+ }
+ }
+ } else if (x.rank === 3) {
+ for (let i = 0; i < reps[0]; ++i) {
+ for (let j = 0; j < reps[1]; ++j) {
+ for (let k = 0; k < reps[2]; ++k) {
+ xGrad = add(
+ xGrad,
+ slice(
+ dy,
+ [i * x.shape[0], j * x.shape[1], k * x.shape[2]],
+ [x.shape[0], x.shape[1], x.shape[2]]
+ )
+ );
+ }
+ }
+ }
+ } else if (x.rank === 4) {
+ for (let i = 0; i < reps[0]; ++i) {
+ for (let j = 0; j < reps[1]; ++j) {
+ for (let k = 0; k < reps[2]; ++k) {
+ for (let l = 0; l < reps[3]; ++l) {
+ xGrad = add(
+ xGrad,
+ slice(
+ dy,
+ [i * x.shape[0], j * x.shape[1], k * x.shape[2], l * x.shape[3]],
+ [x.shape[0], x.shape[1], x.shape[2], x.shape[3]]
+ )
+ );
+ }
+ }
+ }
+ }
+ } else {
+ throw new Error(
+ `Gradient for tile operation is not implemented for rank-` +
+ `${x.rank} tensors yet.`
+ );
+ }
+ return xGrad;
+ };
+ return { x: derX };
+ },
+ };
+ //# sourceMappingURL=Tile_grad.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Transpose_grad.js
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const transposeGradConfig = {
+ kernelName: kernel_names["sb" /* Transpose */],
+ gradFunc: (dy, saved, attrs) => {
+ const transposeAttrs = attrs;
+ const { perm } = transposeAttrs;
+ const undoPerm = getUndoAxesPermutation(perm);
+ return { x: () => transpose(dy, undoPerm) };
+ },
+ };
+ //# sourceMappingURL=Transpose_grad.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js
+ var kernel_registry = __webpack_require__(17);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/register_all_gradients.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ // Export all kernel configs here so that the package can auto register them
+ const gradConfigs = [
+ addGradConfig,
+ addNGradConfig,
+ atan2GradConfig,
+ avgPoolGradConfig,
+ avgPool3DGradConfig,
+ batchMatMulGradConfig,
+ batchToSpaceNDGradConfig,
+ broadcastToGradConfig,
+ concatGradConfig,
+ conv2DGradConfig,
+ conv2DBackpropInputGradConfig,
+ conv3DGradConfig,
+ cumsumGradConfig,
+ depthwiseConv2dNativeGradConfig,
+ divGradConfig,
+ eluGradConfig,
+ floorDivGradConfig,
+ fusedBatchNormGradConfig,
+ greaterEqualGradConfig,
+ identityGradConfig,
+ lrnGradConfig,
+ oneHotGradConfig,
+ padV2GradConfig,
+ splitVGradConfig,
+ maxGradConfig,
+ spaceToBatchNDGradConfig,
+ maxGradConfig,
+ maximumGradConfig,
+ maxPoolGradConfig,
+ maxPool3DGradConfig,
+ minimumGradConfig,
+ modGradConfig,
+ multiplyGradConfig,
+ oneHotGradConfig,
+ padV2GradConfig,
+ powGradConfig,
+ preluGradConfig,
+ reluGradConfig,
+ relu6GradConfig,
+ seluGradConfig,
+ spaceToBatchNDGradConfig,
+ splitVGradConfig,
+ squareGradConfig,
+ squaredDifferenceGradConfig,
+ tileGradConfig,
+ transposeGradConfig,
+ subGradConfig,
+ ];
+ for (const gradientConfig of gradConfigs) {
+ Object(kernel_registry["d" /* registerGradient */])(gradientConfig);
+ }
+ //# sourceMappingURL=register_all_gradients.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/environment.js
+ var environment = __webpack_require__(10);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ class PlatformBrowser {
+ fetch(path, init) {
+ return fetch(path, init);
+ }
+ now() {
+ return performance.now();
+ }
+ encode(text, encoding) {
+ if (encoding !== "utf-8" && encoding !== "utf8") {
+ throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);
+ }
+ if (this.textEncoder == null) {
+ this.textEncoder = new TextEncoder();
+ }
+ return this.textEncoder.encode(text);
+ }
+ decode(bytes, encoding) {
+ return new TextDecoder(encoding).decode(bytes);
+ }
+ }
+ if (Object(environment["c" /* env */])().get("IS_BROWSER")) {
+ Object(environment["c" /* env */])().setPlatform("browser", new PlatformBrowser());
+ }
+ //# sourceMappingURL=platform_browser.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js
+ var platform_node = __webpack_require__(62);
+
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js
+ var io_utils = __webpack_require__(13);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ class IORouterRegistry {
+ constructor() {
+ this.saveRouters = [];
+ this.loadRouters = [];
+ }
+ static getInstance() {
+ if (IORouterRegistry.instance == null) {
+ IORouterRegistry.instance = new IORouterRegistry();
+ }
+ return IORouterRegistry.instance;
+ }
+ /**
+ * Register a save-handler router.
+ *
+ * @param saveRouter A function that maps a URL-like string onto an instance
+ * of `IOHandler` with the `save` method defined or `null`.
+ */
+ static registerSaveRouter(saveRouter) {
+ IORouterRegistry.getInstance().saveRouters.push(saveRouter);
+ }
+ /**
+ * Register a load-handler router.
+ *
+ * @param loadRouter A function that maps a URL-like string onto an instance
+ * of `IOHandler` with the `load` method defined or `null`.
+ */
+ static registerLoadRouter(loadRouter) {
+ IORouterRegistry.getInstance().loadRouters.push(loadRouter);
+ }
+ /**
+ * Look up IOHandler for saving, given a URL-like string.
+ *
+ * @param url
+ * @returns If only one match is found, an instance of IOHandler with the
+ * `save` method defined. If no match is found, `null`.
+ * @throws Error, if more than one match is found.
+ */
+ static getSaveHandlers(url) {
+ return IORouterRegistry.getHandlers(url, "save");
+ }
+ /**
+ * Look up IOHandler for loading, given a URL-like string.
+ *
+ * @param url
+ * @param loadOptions Optional, custom load options.
+ * @returns All valid handlers for `url`, given the currently registered
+ * handler routers.
+ */
+ static getLoadHandlers(url, loadOptions) {
+ return IORouterRegistry.getHandlers(url, "load", loadOptions);
+ }
+ static getHandlers(url, handlerType, loadOptions) {
+ const validHandlers = [];
+ const routers =
+ handlerType === "load"
+ ? IORouterRegistry.getInstance().loadRouters
+ : IORouterRegistry.getInstance().saveRouters;
+ routers.forEach((router) => {
+ const handler = router(url, loadOptions);
+ if (handler !== null) {
+ validHandlers.push(handler);
+ }
+ });
+ return validHandlers;
+ }
+ }
+ const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);
+ const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);
+ const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);
+ const getLoadHandlers = (url, loadOptions) =>
+ IORouterRegistry.getLoadHandlers(url, loadOptions);
+ //# sourceMappingURL=router_registry.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/model_management.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Classes and functions for model management across multiple storage mediums.
+ *
+ * Supported client actions:
+ * - Listing models on all registered storage mediums.
+ * - Remove model by URL from any registered storage mediums, by using URL
+ * string.
+ * - Moving or copying model from one path to another in the same medium or from
+ * one medium to another, by using URL strings.
+ */
+
+ const URL_SCHEME_SUFFIX = "://";
+ class model_management_ModelStoreManagerRegistry {
+ constructor() {
+ this.managers = {};
+ }
+ static getInstance() {
+ if (model_management_ModelStoreManagerRegistry.instance == null) {
+ model_management_ModelStoreManagerRegistry.instance =
+ new model_management_ModelStoreManagerRegistry();
+ }
+ return model_management_ModelStoreManagerRegistry.instance;
+ }
+ /**
+ * Register a save-handler router.
+ *
+ * @param saveRouter A function that maps a URL-like string onto an instance
+ * of `IOHandler` with the `save` method defined or `null`.
+ */
+ static registerManager(scheme, manager) {
+ Object(util["assert"])(scheme != null, () => "scheme must not be undefined or null.");
+ if (scheme.endsWith(URL_SCHEME_SUFFIX)) {
+ scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));
+ }
+ Object(util["assert"])(scheme.length > 0, () => "scheme must not be an empty string.");
+ const registry = model_management_ModelStoreManagerRegistry.getInstance();
+ Object(util["assert"])(
+ registry.managers[scheme] == null,
+ () => `A model store manager is already registered for scheme '${scheme}'.`
+ );
+ registry.managers[scheme] = manager;
+ }
+ static getManager(scheme) {
+ const manager = this.getInstance().managers[scheme];
+ if (manager == null) {
+ throw new Error(`Cannot find model manager for scheme '${scheme}'`);
+ }
+ return manager;
+ }
+ static getSchemes() {
+ return Object.keys(this.getInstance().managers);
+ }
+ }
+ /**
+ * Helper method for parsing a URL string into a scheme and a path.
+ *
+ * @param url E.g., 'localstorage://my-model'
+ * @returns A dictionary with two fields: scheme and path.
+ * Scheme: e.g., 'localstorage' in the example above.
+ * Path: e.g., 'my-model' in the example above.
+ */
+ function parseURL(url) {
+ if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {
+ throw new Error(
+ `The url string provided does not contain a scheme. ` +
+ `Supported schemes are: ` +
+ `${model_management_ModelStoreManagerRegistry.getSchemes().join(",")}`
+ );
+ }
+ return {
+ scheme: url.split(URL_SCHEME_SUFFIX)[0],
+ path: url.split(URL_SCHEME_SUFFIX)[1],
+ };
+ }
+ async function cloneModelInternal(sourceURL, destURL, deleteSource = false) {
+ Object(util["assert"])(
+ sourceURL !== destURL,
+ () => `Old path and new path are the same: '${sourceURL}'`
+ );
+ const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);
+ Object(util["assert"])(
+ loadHandlers.length > 0,
+ () => `Copying failed because no load handler is found for source URL ${sourceURL}.`
+ );
+ Object(util["assert"])(
+ loadHandlers.length < 2,
+ () =>
+ `Copying failed because more than one (${loadHandlers.length}) ` +
+ `load handlers for source URL ${sourceURL}.`
+ );
+ const loadHandler = loadHandlers[0];
+ const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);
+ Object(util["assert"])(
+ saveHandlers.length > 0,
+ () =>
+ `Copying failed because no save handler is found for destination ` + `URL ${destURL}.`
+ );
+ Object(util["assert"])(
+ saveHandlers.length < 2,
+ () =>
+ `Copying failed because more than one (${loadHandlers.length}) ` +
+ `save handlers for destination URL ${destURL}.`
+ );
+ const saveHandler = saveHandlers[0];
+ const sourceScheme = parseURL(sourceURL).scheme;
+ const sourcePath = parseURL(sourceURL).path;
+ const sameMedium = sourceScheme === parseURL(sourceURL).scheme;
+ const modelArtifacts = await loadHandler.load();
+ // If moving within the same storage medium, remove the old model as soon as
+ // the loading is done. Without doing this, it is possible that the combined
+ // size of the two models will cause the cloning to fail.
+ if (deleteSource && sameMedium) {
+ await model_management_ModelStoreManagerRegistry
+ .getManager(sourceScheme)
+ .removeModel(sourcePath);
+ }
+ const saveResult = await saveHandler.save(modelArtifacts);
+ // If moving between mediums, the deletion is done after the save succeeds.
+ // This guards against the case in which saving to the destination medium
+ // fails.
+ if (deleteSource && !sameMedium) {
+ await model_management_ModelStoreManagerRegistry
+ .getManager(sourceScheme)
+ .removeModel(sourcePath);
+ }
+ return saveResult.modelArtifactsInfo;
+ }
+ /**
+ * List all models stored in registered storage mediums.
+ *
+ * For a web browser environment, the registered mediums are Local Storage and
+ * IndexedDB.
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Delete the model.
+ * await tf.io.removeModel('localstorage://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ * ```
+ *
+ * @returns A `Promise` of a dictionary mapping URLs of existing models to
+ * their model artifacts info. URLs include medium-specific schemes, e.g.,
+ * 'indexeddb://my/model/1'. Model artifacts info include type of the
+ * model's topology, byte sizes of the topology, weights, etc.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ async function listModels() {
+ const schemes = model_management_ModelStoreManagerRegistry.getSchemes();
+ const out = {};
+ for (const scheme of schemes) {
+ const schemeOut = await model_management_ModelStoreManagerRegistry
+ .getManager(scheme)
+ .listModels();
+ for (const path in schemeOut) {
+ const url = scheme + URL_SCHEME_SUFFIX + path;
+ out[url] = schemeOut[path];
+ }
+ }
+ return out;
+ }
+ /**
+ * Remove a model specified by URL from a reigstered storage medium.
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Delete the model.
+ * await tf.io.removeModel('localstorage://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ * ```
+ *
+ * @param url A URL to a stored model, with a scheme prefix, e.g.,
+ * 'localstorage://my-model-1', 'indexeddb://my/model/2'.
+ * @returns ModelArtifactsInfo of the deleted model (if and only if deletion
+ * is successful).
+ * @throws Error if deletion fails, e.g., if no model exists at `path`.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ async function removeModel(url) {
+ const schemeAndPath = parseURL(url);
+ const manager = model_management_ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);
+ return manager.removeModel(schemeAndPath.path);
+ }
+ /**
+ * Copy a model from one URL to another.
+ *
+ * This function supports:
+ *
+ * 1. Copying within a storage medium, e.g.,
+ * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`
+ * 2. Copying between two storage mediums, e.g.,
+ * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Copy the model, from Local Storage to IndexedDB.
+ * await tf.io.copyModel(
+ * 'localstorage://demo/management/model1',
+ * 'indexeddb://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Remove both models.
+ * await tf.io.removeModel('localstorage://demo/management/model1');
+ * await tf.io.removeModel('indexeddb://demo/management/model1');
+ * ```
+ *
+ * @param sourceURL Source URL of copying.
+ * @param destURL Destination URL of copying.
+ * @returns ModelArtifactsInfo of the copied model (if and only if copying
+ * is successful).
+ * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or
+ * if `oldPath` and `newPath` are identical.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ async function copyModel(sourceURL, destURL) {
+ const deleteSource = false;
+ return cloneModelInternal(sourceURL, destURL, deleteSource);
+ }
+ /**
+ * Move a model from one URL to another.
+ *
+ * This function supports:
+ *
+ * 1. Moving within a storage medium, e.g.,
+ * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`
+ * 2. Moving between two storage mediums, e.g.,
+ * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Move the model, from Local Storage to IndexedDB.
+ * await tf.io.moveModel(
+ * 'localstorage://demo/management/model1',
+ * 'indexeddb://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Remove the moved model.
+ * await tf.io.removeModel('indexeddb://demo/management/model1');
+ * ```
+ *
+ * @param sourceURL Source URL of moving.
+ * @param destURL Destination URL of moving.
+ * @returns ModelArtifactsInfo of the copied model (if and only if copying
+ * is successful).
+ * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or
+ * if `oldPath` and `newPath` are identical.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ async function moveModel(sourceURL, destURL) {
+ const deleteSource = true;
+ return cloneModelInternal(sourceURL, destURL, deleteSource);
+ }
+
+ //# sourceMappingURL=model_management.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const DATABASE_NAME = "tensorflowjs";
+ const DATABASE_VERSION = 1;
+ // Model data and ModelArtifactsInfo (metadata) are stored in two separate
+ // stores for efficient access of the list of stored models and their metadata.
+ // 1. The object store for model data: topology, weights and weight manifests.
+ const MODEL_STORE_NAME = "models_store";
+ // 2. The object store for ModelArtifactsInfo, including meta-information such
+ // as the type of topology (JSON vs binary), byte size of the topology, byte
+ // size of the weights, etc.
+ const INFO_STORE_NAME = "model_info_store";
+ /**
+ * Delete the entire database for tensorflow.js, including the models store.
+ */
+ async function deleteDatabase() {
+ const idbFactory = getIndexedDBFactory();
+ return new Promise((resolve, reject) => {
+ const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);
+ deleteRequest.onsuccess = () => resolve();
+ deleteRequest.onerror = (error) => reject(error);
+ });
+ }
+ function getIndexedDBFactory() {
+ if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ // TODO(cais): Add more info about what IOHandler subtypes are available.
+ // Maybe point to a doc page on the web and/or automatically determine
+ // the available IOHandlers and print them in the error message.
+ throw new Error(
+ "Failed to obtain IndexedDB factory because the current environment" +
+ "is not a web browser."
+ );
+ }
+ // tslint:disable-next-line:no-any
+ const theWindow = typeof window === "undefined" ? self : window;
+ const factory =
+ theWindow.indexedDB ||
+ theWindow.mozIndexedDB ||
+ theWindow.webkitIndexedDB ||
+ theWindow.msIndexedDB ||
+ theWindow.shimIndexedDB;
+ if (factory == null) {
+ throw new Error("The current browser does not appear to support IndexedDB.");
+ }
+ return factory;
+ }
+ function setUpDatabase(openRequest) {
+ const db = openRequest.result;
+ db.createObjectStore(MODEL_STORE_NAME, { keyPath: "modelPath" });
+ db.createObjectStore(INFO_STORE_NAME, { keyPath: "modelPath" });
+ }
+ /**
+ * IOHandler subclass: Browser IndexedDB.
+ *
+ * See the doc string of `browserIndexedDB` for more details.
+ */
+ class indexed_db_BrowserIndexedDB {
+ constructor(modelPath) {
+ this.indexedDB = getIndexedDBFactory();
+ if (modelPath == null || !modelPath) {
+ throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");
+ }
+ this.modelPath = modelPath;
+ }
+ async save(modelArtifacts) {
+ // TODO(cais): Support saving GraphDef models.
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error(
+ "BrowserLocalStorage.save() does not support saving model topology " +
+ "in binary formats yet."
+ );
+ }
+ return this.databaseAction(this.modelPath, modelArtifacts);
+ }
+ async load() {
+ return this.databaseAction(this.modelPath);
+ }
+ /**
+ * Perform database action to put model artifacts into or read model artifacts
+ * from IndexedDB object store.
+ *
+ * Whether the action is put or get depends on whether `modelArtifacts` is
+ * specified. If it is specified, the action will be put; otherwise the action
+ * will be get.
+ *
+ * @param modelPath A unique string path for the model.
+ * @param modelArtifacts If specified, it will be the model artifacts to be
+ * stored in IndexedDB.
+ * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`
+ * of `ModelArtifacts`, if the action is get.
+ */
+ databaseAction(modelPath, modelArtifacts) {
+ return new Promise((resolve, reject) => {
+ const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
+ openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
+ openRequest.onsuccess = () => {
+ const db = openRequest.result;
+ if (modelArtifacts == null) {
+ // Read model out from object store.
+ const modelTx = db.transaction(MODEL_STORE_NAME, "readonly");
+ const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
+ const getRequest = modelStore.get(this.modelPath);
+ getRequest.onsuccess = () => {
+ if (getRequest.result == null) {
+ db.close();
+ return reject(
+ new Error(
+ `Cannot find model with path '${this.modelPath}' ` + `in IndexedDB.`
+ )
+ );
+ } else {
+ resolve(getRequest.result.modelArtifacts);
+ }
+ };
+ getRequest.onerror = (error) => {
+ db.close();
+ return reject(getRequest.error);
+ };
+ modelTx.oncomplete = () => db.close();
+ } else {
+ // Put model into object store.
+ const modelArtifactsInfo = Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
+ modelArtifacts
+ );
+ // First, put ModelArtifactsInfo into info store.
+ const infoTx = db.transaction(INFO_STORE_NAME, "readwrite");
+ let infoStore = infoTx.objectStore(INFO_STORE_NAME);
+ const putInfoRequest = infoStore.put({
+ modelPath: this.modelPath,
+ modelArtifactsInfo,
+ });
+ let modelTx;
+ putInfoRequest.onsuccess = () => {
+ // Second, put model data into model store.
+ modelTx = db.transaction(MODEL_STORE_NAME, "readwrite");
+ const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
+ const putModelRequest = modelStore.put({
+ modelPath: this.modelPath,
+ modelArtifacts,
+ modelArtifactsInfo,
+ });
+ putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });
+ putModelRequest.onerror = (error) => {
+ // If the put-model request fails, roll back the info entry as
+ // well.
+ infoStore = infoTx.objectStore(INFO_STORE_NAME);
+ const deleteInfoRequest = infoStore.delete(this.modelPath);
+ deleteInfoRequest.onsuccess = () => {
+ db.close();
+ return reject(putModelRequest.error);
+ };
+ deleteInfoRequest.onerror = (error) => {
+ db.close();
+ return reject(putModelRequest.error);
+ };
+ };
+ };
+ putInfoRequest.onerror = (error) => {
+ db.close();
+ return reject(putInfoRequest.error);
+ };
+ infoTx.oncomplete = () => {
+ if (modelTx == null) {
+ db.close();
+ } else {
+ modelTx.oncomplete = () => db.close();
+ }
+ };
+ }
+ };
+ openRequest.onerror = (error) => reject(openRequest.error);
+ });
+ }
+ }
+ indexed_db_BrowserIndexedDB.URL_SCHEME = "indexeddb://";
+ const indexedDBRouter = (url) => {
+ if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ return null;
+ } else {
+ if (!Array.isArray(url) && url.startsWith(indexed_db_BrowserIndexedDB.URL_SCHEME)) {
+ return browserIndexedDB(url.slice(indexed_db_BrowserIndexedDB.URL_SCHEME.length));
+ } else {
+ return null;
+ }
+ }
+ };
+ IORouterRegistry.registerSaveRouter(indexedDBRouter);
+ IORouterRegistry.registerLoadRouter(indexedDBRouter);
+ /**
+ * Creates a browser IndexedDB IOHandler for saving and loading models.
+ *
+ * ```js
+ * const model = tf.sequential();
+ * model.add(
+ * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
+ *
+ * const saveResult = await model.save('indexeddb://MyModel'));
+ * console.log(saveResult);
+ * ```
+ *
+ * @param modelPath A unique identifier for the model to be saved. Must be a
+ * non-empty string.
+ * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),
+ * which can be used with, e.g., `tf.Model.save`.
+ */
+ function browserIndexedDB(modelPath) {
+ return new indexed_db_BrowserIndexedDB(modelPath);
+ }
+ function maybeStripScheme(key) {
+ return key.startsWith(indexed_db_BrowserIndexedDB.URL_SCHEME)
+ ? key.slice(indexed_db_BrowserIndexedDB.URL_SCHEME.length)
+ : key;
+ }
+ class BrowserIndexedDBManager {
+ constructor() {
+ this.indexedDB = getIndexedDBFactory();
+ }
+ async listModels() {
+ return new Promise((resolve, reject) => {
+ const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
+ openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
+ openRequest.onsuccess = () => {
+ const db = openRequest.result;
+ const tx = db.transaction(INFO_STORE_NAME, "readonly");
+ const store = tx.objectStore(INFO_STORE_NAME);
+ // tslint:disable:max-line-length
+ // Need to cast `store` as `any` here because TypeScript's DOM
+ // library does not have the `getAll()` method even though the
+ // method is supported in the latest version of most mainstream
+ // browsers:
+ // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll
+ // tslint:enable:max-line-length
+ // tslint:disable-next-line:no-any
+ const getAllInfoRequest = store.getAll();
+ getAllInfoRequest.onsuccess = () => {
+ const out = {};
+ for (const item of getAllInfoRequest.result) {
+ out[item.modelPath] = item.modelArtifactsInfo;
+ }
+ resolve(out);
+ };
+ getAllInfoRequest.onerror = (error) => {
+ db.close();
+ return reject(getAllInfoRequest.error);
+ };
+ tx.oncomplete = () => db.close();
+ };
+ openRequest.onerror = (error) => reject(openRequest.error);
+ });
+ }
+ async removeModel(path) {
+ path = maybeStripScheme(path);
+ return new Promise((resolve, reject) => {
+ const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
+ openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
+ openRequest.onsuccess = () => {
+ const db = openRequest.result;
+ const infoTx = db.transaction(INFO_STORE_NAME, "readwrite");
+ const infoStore = infoTx.objectStore(INFO_STORE_NAME);
+ const getInfoRequest = infoStore.get(path);
+ let modelTx;
+ getInfoRequest.onsuccess = () => {
+ if (getInfoRequest.result == null) {
+ db.close();
+ return reject(
+ new Error(`Cannot find model with path '${path}' ` + `in IndexedDB.`)
+ );
+ } else {
+ // First, delete the entry in the info store.
+ const deleteInfoRequest = infoStore.delete(path);
+ const deleteModelData = () => {
+ // Second, delete the entry in the model store.
+ modelTx = db.transaction(MODEL_STORE_NAME, "readwrite");
+ const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
+ const deleteModelRequest = modelStore.delete(path);
+ deleteModelRequest.onsuccess = () =>
+ resolve(getInfoRequest.result.modelArtifactsInfo);
+ deleteModelRequest.onerror = (error) => reject(getInfoRequest.error);
+ };
+ // Proceed with deleting model data regardless of whether deletion
+ // of info data succeeds or not.
+ deleteInfoRequest.onsuccess = deleteModelData;
+ deleteInfoRequest.onerror = (error) => {
+ deleteModelData();
+ db.close();
+ return reject(getInfoRequest.error);
+ };
+ }
+ };
+ getInfoRequest.onerror = (error) => {
+ db.close();
+ return reject(getInfoRequest.error);
+ };
+ infoTx.oncomplete = () => {
+ if (modelTx == null) {
+ db.close();
+ } else {
+ modelTx.oncomplete = () => db.close();
+ }
+ };
+ };
+ openRequest.onerror = (error) => reject(openRequest.error);
+ });
+ }
+ }
+ if (Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ // Wrap the construction and registration, to guard against browsers that
+ // don't support Local Storage.
+ try {
+ model_management_ModelStoreManagerRegistry.registerManager(
+ indexed_db_BrowserIndexedDB.URL_SCHEME,
+ new BrowserIndexedDBManager()
+ );
+ } catch (err) {}
+ }
+ //# sourceMappingURL=indexed_db.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ const PATH_SEPARATOR = "/";
+ const PATH_PREFIX = "tensorflowjs_models";
+ const INFO_SUFFIX = "info";
+ const MODEL_TOPOLOGY_SUFFIX = "model_topology";
+ const WEIGHT_SPECS_SUFFIX = "weight_specs";
+ const WEIGHT_DATA_SUFFIX = "weight_data";
+ const MODEL_METADATA_SUFFIX = "model_metadata";
+ /**
+ * Purge all tensorflow.js-saved model artifacts from local storage.
+ *
+ * @returns Paths of the models purged.
+ */
+ function purgeLocalStorageArtifacts() {
+ if (
+ !Object(environment["c" /* env */])().getBool("IS_BROWSER") ||
+ typeof window === "undefined" ||
+ typeof window.localStorage === "undefined"
+ ) {
+ throw new Error(
+ "purgeLocalStorageModels() cannot proceed because local storage is " +
+ "unavailable in the current environment."
+ );
+ }
+ const LS = window.localStorage;
+ const purgedModelPaths = [];
+ for (let i = 0; i < LS.length; ++i) {
+ const key = LS.key(i);
+ const prefix = PATH_PREFIX + PATH_SEPARATOR;
+ if (key.startsWith(prefix) && key.length > prefix.length) {
+ LS.removeItem(key);
+ const modelName = getModelPathFromKey(key);
+ if (purgedModelPaths.indexOf(modelName) === -1) {
+ purgedModelPaths.push(modelName);
+ }
+ }
+ }
+ return purgedModelPaths;
+ }
+ function getModelKeys(path) {
+ return {
+ info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),
+ topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),
+ weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),
+ weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),
+ modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR),
+ };
+ }
+ /**
+ * Get model path from a local-storage key.
+ *
+ * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'
+ *
+ * @param key
+ */
+ function getModelPathFromKey(key) {
+ const items = key.split(PATH_SEPARATOR);
+ if (items.length < 3) {
+ throw new Error(`Invalid key format: ${key}`);
+ }
+ return items.slice(1, items.length - 1).join(PATH_SEPARATOR);
+ }
+ function local_storage_maybeStripScheme(key) {
+ return key.startsWith(local_storage_BrowserLocalStorage.URL_SCHEME)
+ ? key.slice(local_storage_BrowserLocalStorage.URL_SCHEME.length)
+ : key;
+ }
+ /**
+ * IOHandler subclass: Browser Local Storage.
+ *
+ * See the doc string to `browserLocalStorage` for more details.
+ */
+ class local_storage_BrowserLocalStorage {
+ constructor(modelPath) {
+ if (
+ !Object(environment["c" /* env */])().getBool("IS_BROWSER") ||
+ typeof window === "undefined" ||
+ typeof window.localStorage === "undefined"
+ ) {
+ // TODO(cais): Add more info about what IOHandler subtypes are
+ // available.
+ // Maybe point to a doc page on the web and/or automatically determine
+ // the available IOHandlers and print them in the error message.
+ throw new Error("The current environment does not support local storage.");
+ }
+ this.LS = window.localStorage;
+ if (modelPath == null || !modelPath) {
+ throw new Error("For local storage, modelPath must not be null, undefined or empty.");
+ }
+ this.modelPath = modelPath;
+ this.keys = getModelKeys(this.modelPath);
+ }
+ /**
+ * Save model artifacts to browser local storage.
+ *
+ * See the documentation to `browserLocalStorage` for details on the saved
+ * artifacts.
+ *
+ * @param modelArtifacts The model artifacts to be stored.
+ * @returns An instance of SaveResult.
+ */
+ async save(modelArtifacts) {
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error(
+ "BrowserLocalStorage.save() does not support saving model topology " +
+ "in binary formats yet."
+ );
+ } else {
+ const topology = JSON.stringify(modelArtifacts.modelTopology);
+ const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);
+ const modelArtifactsInfo = Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
+ modelArtifacts
+ );
+ try {
+ this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));
+ this.LS.setItem(this.keys.topology, topology);
+ this.LS.setItem(this.keys.weightSpecs, weightSpecs);
+ this.LS.setItem(
+ this.keys.weightData,
+ Object(io_utils["a" /* arrayBufferToBase64String */])(modelArtifacts.weightData)
+ );
+ this.LS.setItem(
+ this.keys.modelMetadata,
+ JSON.stringify({
+ format: modelArtifacts.format,
+ generatedBy: modelArtifacts.generatedBy,
+ convertedBy: modelArtifacts.convertedBy,
+ userDefinedMetadata: modelArtifacts.userDefinedMetadata,
+ })
+ );
+ return { modelArtifactsInfo };
+ } catch (err) {
+ // If saving failed, clean up all items saved so far.
+ this.LS.removeItem(this.keys.info);
+ this.LS.removeItem(this.keys.topology);
+ this.LS.removeItem(this.keys.weightSpecs);
+ this.LS.removeItem(this.keys.weightData);
+ this.LS.removeItem(this.keys.modelMetadata);
+ throw new Error(
+ `Failed to save model '${this.modelPath}' to local storage: ` +
+ `size quota being exceeded is a possible cause of this failure: ` +
+ `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +
+ `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +
+ `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`
+ );
+ }
+ }
+ }
+ /**
+ * Load a model from local storage.
+ *
+ * See the documentation to `browserLocalStorage` for details on the saved
+ * artifacts.
+ *
+ * @returns The loaded model (if loading succeeds).
+ */
+ async load() {
+ const info = JSON.parse(this.LS.getItem(this.keys.info));
+ if (info == null) {
+ throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);
+ }
+ if (info.modelTopologyType !== "JSON") {
+ throw new Error(
+ "BrowserLocalStorage does not support loading non-JSON model " + "topology yet."
+ );
+ }
+ const out = {};
+ // Load topology.
+ const topology = JSON.parse(this.LS.getItem(this.keys.topology));
+ if (topology == null) {
+ throw new Error(
+ `In local storage, the topology of model '${this.modelPath}' ` + `is missing.`
+ );
+ }
+ out.modelTopology = topology;
+ // Load weight specs.
+ const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));
+ if (weightSpecs == null) {
+ throw new Error(
+ `In local storage, the weight specs of model '${this.modelPath}' ` + `are missing.`
+ );
+ }
+ out.weightSpecs = weightSpecs;
+ // Load meta-data fields.
+ const metadataString = this.LS.getItem(this.keys.modelMetadata);
+ if (metadataString != null) {
+ const metadata = JSON.parse(metadataString);
+ out.format = metadata["format"];
+ out.generatedBy = metadata["generatedBy"];
+ out.convertedBy = metadata["convertedBy"];
+ out.userDefinedMetadata = metadata["userDefinedMetadata"];
+ }
+ // Load weight data.
+ const weightDataBase64 = this.LS.getItem(this.keys.weightData);
+ if (weightDataBase64 == null) {
+ throw new Error(
+ `In local storage, the binary weight values of model ` +
+ `'${this.modelPath}' are missing.`
+ );
+ }
+ out.weightData = Object(io_utils["b" /* base64StringToArrayBuffer */])(weightDataBase64);
+ return out;
+ }
+ }
+ local_storage_BrowserLocalStorage.URL_SCHEME = "localstorage://";
+ const localStorageRouter = (url) => {
+ if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ return null;
+ } else {
+ if (!Array.isArray(url) && url.startsWith(local_storage_BrowserLocalStorage.URL_SCHEME)) {
+ return browserLocalStorage(
+ url.slice(local_storage_BrowserLocalStorage.URL_SCHEME.length)
+ );
+ } else {
+ return null;
+ }
+ }
+ };
+ IORouterRegistry.registerSaveRouter(localStorageRouter);
+ IORouterRegistry.registerLoadRouter(localStorageRouter);
+ /**
+ * Factory function for local storage IOHandler.
+ *
+ * This `IOHandler` supports both `save` and `load`.
+ *
+ * For each model's saved artifacts, four items are saved to local storage.
+ * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the
+ * model, such as date saved, type of the topology, size in bytes, etc.
+ * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-
+ * style models, this is a stringized JSON.
+ * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the
+ * model, can be used to decode the saved binary weight values (see
+ * item below).
+ * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary
+ * weight values, stored as a base64-encoded string.
+ *
+ * Saving may throw an `Error` if the total size of the artifacts exceed the
+ * browser-specific quota.
+ *
+ * @param modelPath A unique identifier for the model to be saved. Must be a
+ * non-empty string.
+ * @returns An instance of `IOHandler`, which can be used with, e.g.,
+ * `tf.Model.save`.
+ */
+ function browserLocalStorage(modelPath) {
+ return new local_storage_BrowserLocalStorage(modelPath);
+ }
+ class local_storage_BrowserLocalStorageManager {
+ constructor() {
+ Object(util["assert"])(
+ Object(environment["c" /* env */])().getBool("IS_BROWSER"),
+ () => "Current environment is not a web browser"
+ );
+ Object(util["assert"])(
+ typeof window === "undefined" || typeof window.localStorage !== "undefined",
+ () => "Current browser does not appear to support localStorage"
+ );
+ this.LS = window.localStorage;
+ }
+ async listModels() {
+ const out = {};
+ const prefix = PATH_PREFIX + PATH_SEPARATOR;
+ const suffix = PATH_SEPARATOR + INFO_SUFFIX;
+ for (let i = 0; i < this.LS.length; ++i) {
+ const key = this.LS.key(i);
+ if (key.startsWith(prefix) && key.endsWith(suffix)) {
+ const modelPath = getModelPathFromKey(key);
+ out[modelPath] = JSON.parse(this.LS.getItem(key));
+ }
+ }
+ return out;
+ }
+ async removeModel(path) {
+ path = local_storage_maybeStripScheme(path);
+ const keys = getModelKeys(path);
+ if (this.LS.getItem(keys.info) == null) {
+ throw new Error(`Cannot find model at path '${path}'`);
+ }
+ const info = JSON.parse(this.LS.getItem(keys.info));
+ this.LS.removeItem(keys.info);
+ this.LS.removeItem(keys.topology);
+ this.LS.removeItem(keys.weightSpecs);
+ this.LS.removeItem(keys.weightData);
+ return info;
+ }
+ }
+ if (Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ // Wrap the construction and registration, to guard against browsers that
+ // don't support Local Storage.
+ try {
+ model_management_ModelStoreManagerRegistry.registerManager(
+ local_storage_BrowserLocalStorage.URL_SCHEME,
+ new local_storage_BrowserLocalStorageManager()
+ );
+ } catch (err) {}
+ }
+ //# sourceMappingURL=local_storage.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * IOHandlers related to files, such as browser-triggered file downloads,
+ * user-selected files in browser.
+ */
+
+ const DEFAULT_FILE_NAME_PREFIX = "model";
+ const DEFAULT_JSON_EXTENSION_NAME = ".json";
+ const DEFAULT_WEIGHT_DATA_EXTENSION_NAME = ".weights.bin";
+ function defer(f) {
+ return new Promise((resolve) => setTimeout(resolve)).then(f);
+ }
+ class browser_files_BrowserDownloads {
+ constructor(fileNamePrefix) {
+ if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ // TODO(cais): Provide info on what IOHandlers are available under the
+ // current environment.
+ throw new Error(
+ "browserDownloads() cannot proceed because the current environment " +
+ "is not a browser."
+ );
+ }
+ if (fileNamePrefix.startsWith(browser_files_BrowserDownloads.URL_SCHEME)) {
+ fileNamePrefix = fileNamePrefix.slice(browser_files_BrowserDownloads.URL_SCHEME.length);
+ }
+ if (fileNamePrefix == null || fileNamePrefix.length === 0) {
+ fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;
+ }
+ this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;
+ this.weightDataFileName = fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;
+ }
+ async save(modelArtifacts) {
+ if (typeof document === "undefined") {
+ throw new Error(
+ "Browser downloads are not supported in " +
+ "this environment since `document` is not present"
+ );
+ }
+ const weightsURL = window.URL.createObjectURL(
+ new Blob([modelArtifacts.weightData], { type: "application/octet-stream" })
+ );
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error(
+ "BrowserDownloads.save() does not support saving model topology " +
+ "in binary formats yet."
+ );
+ } else {
+ const weightsManifest = [
+ {
+ paths: ["./" + this.weightDataFileName],
+ weights: modelArtifacts.weightSpecs,
+ },
+ ];
+ const modelTopologyAndWeightManifest = {
+ modelTopology: modelArtifacts.modelTopology,
+ format: modelArtifacts.format,
+ generatedBy: modelArtifacts.generatedBy,
+ convertedBy: modelArtifacts.convertedBy,
+ weightsManifest,
+ };
+ const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(
+ new Blob([JSON.stringify(modelTopologyAndWeightManifest)], {
+ type: "application/json",
+ })
+ );
+ // If anchor elements are not provided, create them without attaching them
+ // to parents, so that the downloaded file names can be controlled.
+ const jsonAnchor =
+ this.jsonAnchor == null ? document.createElement("a") : this.jsonAnchor;
+ jsonAnchor.download = this.modelTopologyFileName;
+ jsonAnchor.href = modelTopologyAndWeightManifestURL;
+ // Trigger downloads by evoking a click event on the download anchors.
+ // When multiple downloads are started synchronously, Firefox will only
+ // save the last one.
+ await defer(() => jsonAnchor.dispatchEvent(new MouseEvent("click")));
+ if (modelArtifacts.weightData != null) {
+ const weightDataAnchor =
+ this.weightDataAnchor == null ? document.createElement("a") : this.weightDataAnchor;
+ weightDataAnchor.download = this.weightDataFileName;
+ weightDataAnchor.href = weightsURL;
+ await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent("click")));
+ }
+ return {
+ modelArtifactsInfo: Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
+ modelArtifacts
+ ),
+ };
+ }
+ }
+ }
+ browser_files_BrowserDownloads.URL_SCHEME = "downloads://";
+ class browser_files_BrowserFiles {
+ constructor(files) {
+ if (files == null || files.length < 1) {
+ throw new Error(
+ `When calling browserFiles, at least 1 file is required, ` + `but received ${files}`
+ );
+ }
+ this.files = files;
+ }
+ async load() {
+ const jsonFile = this.files[0];
+ const weightFiles = this.files.slice(1);
+ return new Promise((resolve, reject) => {
+ const jsonReader = new FileReader();
+ jsonReader.onload = (event) => {
+ // tslint:disable-next-line:no-any
+ const modelJSON = JSON.parse(event.target.result);
+ const modelTopology = modelJSON.modelTopology;
+ if (modelTopology == null) {
+ reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));
+ return;
+ }
+ if (weightFiles.length === 0) {
+ resolve({ modelTopology });
+ }
+ const weightsManifest = modelJSON.weightsManifest;
+ if (weightsManifest == null) {
+ reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));
+ return;
+ }
+ let pathToFile;
+ try {
+ pathToFile = this.checkManifestAndWeightFiles(weightsManifest, weightFiles);
+ } catch (err) {
+ reject(err);
+ return;
+ }
+ const weightSpecs = [];
+ const paths = [];
+ const perFileBuffers = [];
+ weightsManifest.forEach((weightsGroup) => {
+ weightsGroup.paths.forEach((path) => {
+ paths.push(path);
+ perFileBuffers.push(null);
+ });
+ weightSpecs.push(...weightsGroup.weights);
+ });
+ weightsManifest.forEach((weightsGroup) => {
+ weightsGroup.paths.forEach((path) => {
+ const weightFileReader = new FileReader();
+ weightFileReader.onload = (event) => {
+ // tslint:disable-next-line:no-any
+ const weightData = event.target.result;
+ const index = paths.indexOf(path);
+ perFileBuffers[index] = weightData;
+ if (perFileBuffers.indexOf(null) === -1) {
+ resolve({
+ modelTopology,
+ weightSpecs,
+ weightData: Object(io_utils["d" /* concatenateArrayBuffers */])(
+ perFileBuffers
+ ),
+ format: modelJSON.format,
+ generatedBy: modelJSON.generatedBy,
+ convertedBy: modelJSON.convertedBy,
+ userDefinedMetadata: modelJSON.userDefinedMetadata,
+ });
+ }
+ };
+ weightFileReader.onerror = (error) =>
+ reject(`Failed to weights data from file of path '${path}'.`);
+ weightFileReader.readAsArrayBuffer(pathToFile[path]);
+ });
+ });
+ };
+ jsonReader.onerror = (error) =>
+ reject(
+ `Failed to read model topology and weights manifest JSON ` +
+ `from file '${jsonFile.name}'. BrowserFiles supports loading ` +
+ `Keras-style tf.Model artifacts only.`
+ );
+ jsonReader.readAsText(jsonFile);
+ });
+ }
+ /**
+ * Check the compatibility between weights manifest and weight files.
+ */
+ checkManifestAndWeightFiles(manifest, files) {
+ const basenames = [];
+ const fileNames = files.map((file) => Object(io_utils["c" /* basename */])(file.name));
+ const pathToFile = {};
+ for (const group of manifest) {
+ group.paths.forEach((path) => {
+ const pathBasename = Object(io_utils["c" /* basename */])(path);
+ if (basenames.indexOf(pathBasename) !== -1) {
+ throw new Error(
+ `Duplicate file basename found in weights manifest: ` + `'${pathBasename}'`
+ );
+ }
+ basenames.push(pathBasename);
+ if (fileNames.indexOf(pathBasename) === -1) {
+ throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);
+ } else {
+ pathToFile[path] = files[fileNames.indexOf(pathBasename)];
+ }
+ });
+ }
+ if (basenames.length !== files.length) {
+ throw new Error(
+ `Mismatch in the number of files in weights manifest ` +
+ `(${basenames.length}) and the number of weight files provided ` +
+ `(${files.length}).`
+ );
+ }
+ return pathToFile;
+ }
+ }
+ const browserDownloadsRouter = (url) => {
+ if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
+ return null;
+ } else {
+ if (!Array.isArray(url) && url.startsWith(browser_files_BrowserDownloads.URL_SCHEME)) {
+ return browserDownloads(url.slice(browser_files_BrowserDownloads.URL_SCHEME.length));
+ } else {
+ return null;
+ }
+ }
+ };
+ IORouterRegistry.registerSaveRouter(browserDownloadsRouter);
+ /**
+ * Creates an IOHandler that triggers file downloads from the browser.
+ *
+ * The returned `IOHandler` instance can be used as model exporting methods such
+ * as `tf.Model.save` and supports only saving.
+ *
+ * ```js
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * const saveResult = await model.save('downloads://mymodel');
+ * // This will trigger downloading of two files:
+ * // 'mymodel.json' and 'mymodel.weights.bin'.
+ * console.log(saveResult);
+ * ```
+ *
+ * @param fileNamePrefix Prefix name of the files to be downloaded. For use with
+ * `tf.Model`, `fileNamePrefix` should follow either of the following two
+ * formats:
+ * 1. `null` or `undefined`, in which case the default file
+ * names will be used:
+ * - 'model.json' for the JSON file containing the model topology and
+ * weights manifest.
+ * - 'model.weights.bin' for the binary file containing the binary weight
+ * values.
+ * 2. A single string or an Array of a single string, as the file name prefix.
+ * For example, if `'foo'` is provided, the downloaded JSON
+ * file and binary weights file will be named 'foo.json' and
+ * 'foo.weights.bin', respectively.
+ * @param config Additional configuration for triggering downloads.
+ * @returns An instance of `BrowserDownloads` `IOHandler`.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Loading',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function browserDownloads(fileNamePrefix = "model") {
+ return new browser_files_BrowserDownloads(fileNamePrefix);
+ }
+ /**
+ * Creates an IOHandler that loads model artifacts from user-selected files.
+ *
+ * This method can be used for loading from files such as user-selected files
+ * in the browser.
+ * When used in conjunction with `tf.loadLayersModel`, an instance of
+ * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
+ *
+ * ```js
+ * // Note: This code snippet won't run properly without the actual file input
+ * // elements in the HTML DOM.
+ *
+ * // Suppose there are two HTML file input (``)
+ * // elements.
+ * const uploadJSONInput = document.getElementById('upload-json');
+ * const uploadWeightsInput = document.getElementById('upload-weights');
+ * const model = await tf.loadLayersModel(tf.io.browserFiles(
+ * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));
+ * ```
+ *
+ * @param files `File`s to load from. Currently, this function supports only
+ * loading from files that contain Keras-style models (i.e., `tf.Model`s), for
+ * which an `Array` of `File`s is expected (in that order):
+ * - A JSON file containing the model topology and weight manifest.
+ * - Optionally, One or more binary files containing the binary weights.
+ * These files must have names that match the paths in the `weightsManifest`
+ * contained by the aforementioned JSON file, or errors will be thrown
+ * during loading. These weights files have the same format as the ones
+ * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`
+ * Python PIP package. If no weights files are provided, only the model
+ * topology will be loaded from the JSON file above.
+ * @returns An instance of `Files` `IOHandler`.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Loading',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function browserFiles(files) {
+ return new browser_files_BrowserFiles(files);
+ }
+ //# sourceMappingURL=browser_files.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/progress.js
+ /**
+ * @license
+ * Copyright 2019 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Monitor Promise.all progress, fire onProgress callback function.
+ *
+ * @param promises Promise list going to be monitored
+ * @param onProgress Callback function. Fired when a promise resolved.
+ * @param startFraction Optional fraction start. Default to 0.
+ * @param endFraction Optional fraction end. Default to 1.
+ */
+ function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {
+ checkPromises(promises);
+ startFraction = startFraction == null ? 0 : startFraction;
+ endFraction = endFraction == null ? 1 : endFraction;
+ checkFraction(startFraction, endFraction);
+ let resolvedPromise = 0;
+ const registerMonitor = (promise) => {
+ promise.then((value) => {
+ const fraction =
+ startFraction + (++resolvedPromise / promises.length) * (endFraction - startFraction);
+ // pass fraction as parameter to callback function.
+ onProgress(fraction);
+ return value;
+ });
+ return promise;
+ };
+ function checkPromises(promises) {
+ Object(util["assert"])(
+ promises != null && Array.isArray(promises) && promises.length > 0,
+ () => "promises must be a none empty array"
+ );
+ }
+ function checkFraction(startFraction, endFraction) {
+ Object(util["assert"])(
+ startFraction >= 0 && startFraction <= 1,
+ () =>
+ `Progress fraction must be in range [0, 1], but ` +
+ `got startFraction ${startFraction}`
+ );
+ Object(util["assert"])(
+ endFraction >= 0 && endFraction <= 1,
+ () =>
+ `Progress fraction must be in range [0, 1], but ` + `got endFraction ${endFraction}`
+ );
+ Object(util["assert"])(
+ endFraction >= startFraction,
+ () =>
+ `startFraction must be no more than endFraction, but ` +
+ `got startFraction ${startFraction} and endFraction ` +
+ `${endFraction}`
+ );
+ }
+ return Promise.all(promises.map(registerMonitor));
+ }
+ //# sourceMappingURL=progress.js.map
+ // EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/types.js
+ var types = __webpack_require__(34);
+
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Reads binary weights data from a number of URLs.
+ *
+ * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.
+ * @param requestOptions RequestInit (options) for the HTTP requests.
+ * @param fetchFunc Optional overriding value for the `window.fetch` function.
+ * @param onProgress Optional, progress callback function, fired periodically
+ * before the load is completed.
+ * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same
+ * length as `fetchURLs`.
+ */
+ async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {
+ if (loadOptions == null) {
+ loadOptions = {};
+ }
+ const fetchFunc =
+ loadOptions.fetchFunc == null
+ ? Object(environment["c" /* env */])().platform.fetch
+ : loadOptions.fetchFunc;
+ // Create the requests for all of the weights in parallel.
+ const requests = fetchURLs.map((fetchURL) =>
+ fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true })
+ );
+ const fetchStartFraction = 0;
+ const fetchEndFraction = 0.5;
+ const responses =
+ loadOptions.onProgress == null
+ ? await Promise.all(requests)
+ : await monitorPromisesProgress(
+ requests,
+ loadOptions.onProgress,
+ fetchStartFraction,
+ fetchEndFraction
+ );
+ const bufferPromises = responses.map((response) => response.arrayBuffer());
+ const bufferStartFraction = 0.5;
+ const bufferEndFraction = 1;
+ const buffers =
+ loadOptions.onProgress == null
+ ? await Promise.all(bufferPromises)
+ : await monitorPromisesProgress(
+ bufferPromises,
+ loadOptions.onProgress,
+ bufferStartFraction,
+ bufferEndFraction
+ );
+ return buffers;
+ }
+ /**
+ * Reads a weights manifest JSON configuration, fetches the weights and
+ * returns them as `Tensor`s.
+ *
+ * @param manifest The weights manifest JSON.
+ * @param filePathPrefix The path prefix for filenames given in the manifest.
+ * Defaults to the empty string.
+ * @param weightNames The names of the weights to be fetched.
+ */
+ async function loadWeights(manifest, filePathPrefix = "", weightNames, requestInit) {
+ // TODO(nsthorat): Groups are currently fetched atomically. If you need a
+ // single weight from a group, the whole group will be fetched. At a future
+ // date, we should support fetching only the individual shards within a
+ // group that are needed to reconstruct the requested weight.
+ // TODO(cais): Use `decodeWeights` for implementation.
+ const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });
+ const loadWeights = weightsLoaderFactory(fetchWeights);
+ return loadWeights(manifest, filePathPrefix, weightNames);
+ }
+ /**
+ * Creates a function, which reads a weights manifest JSON configuration,
+ * fetches the weight files using the specified function and returns them as
+ * `Tensor`s.
+ *
+ * ```js
+ * // example for creating a nodejs weight loader, which reads the weight files
+ * // from disk using fs.readFileSync
+ *
+ * import * as fs from 'fs'
+ *
+ * const fetchWeightsFromDisk = (filePaths: string[]) =>
+ * filePaths.map(filePath => fs.readFileSync(filePath).buffer)
+ *
+ * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)
+ *
+ * const manifest = JSON.parse(
+ * fs.readFileSync('./my_model-weights_manifest').toString()
+ * )
+ * const weightMap = await loadWeights(manifest, './')
+ * ```
+ * @param fetchWeightsFunction The function used for fetching the weight files.
+ * @returns Weight loading function.
+ */
+ function weightsLoaderFactory(fetchWeightsFunction) {
+ return async (manifest, filePathPrefix = "", weightNames) => {
+ // Collect all the groups, weights, and their relative offsets to be
+ // fetched.
+ const groupIndicesToFetchMap = manifest.map(() => false);
+ const groupWeightsToFetch = {};
+ const weightsFound = weightNames != null ? weightNames.map(() => false) : [];
+ const allManifestWeightNames = [];
+ manifest.forEach((manifestGroupConfig, groupIndex) => {
+ let groupOffset = 0;
+ manifestGroupConfig.weights.forEach((weightsEntry) => {
+ const rawDtype =
+ "quantization" in weightsEntry
+ ? weightsEntry.quantization.dtype
+ : weightsEntry.dtype;
+ const weightsBytes =
+ types["a" /* DTYPE_VALUE_SIZE_MAP */][rawDtype] *
+ util["sizeFromShape"](weightsEntry.shape);
+ const enqueueWeightsForFetchingFn = () => {
+ groupIndicesToFetchMap[groupIndex] = true;
+ if (groupWeightsToFetch[groupIndex] == null) {
+ groupWeightsToFetch[groupIndex] = [];
+ }
+ groupWeightsToFetch[groupIndex].push({
+ manifestEntry: weightsEntry,
+ groupOffset,
+ sizeBytes: weightsBytes,
+ });
+ };
+ if (weightNames != null) {
+ weightNames.forEach((weightName, weightIndex) => {
+ if (weightName === weightsEntry.name) {
+ enqueueWeightsForFetchingFn();
+ weightsFound[weightIndex] = true;
+ }
+ });
+ } else {
+ enqueueWeightsForFetchingFn();
+ }
+ allManifestWeightNames.push(weightsEntry.name);
+ groupOffset += weightsBytes;
+ });
+ });
+ if (!weightsFound.every((found) => found)) {
+ const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);
+ throw new Error(
+ `Could not find weights in manifest with names: ` +
+ `${weightsNotFound.join(", ")}. \n` +
+ `Manifest JSON has weights with names: ` +
+ `${allManifestWeightNames.join(", ")}.`
+ );
+ }
+ // Convert the one-hot boolean groupId => shouldFetch map to a list of group
+ // IDs.
+ const groupIndicesToFetch = groupIndicesToFetchMap.reduce(
+ (accumulator, shouldFetch, i) => {
+ if (shouldFetch) {
+ accumulator.push(i);
+ }
+ return accumulator;
+ },
+ []
+ );
+ const fetchUrls = [];
+ groupIndicesToFetch.forEach((i) => {
+ manifest[i].paths.forEach((filepath) => {
+ const fetchUrl =
+ filePathPrefix + (!filePathPrefix.endsWith("/") ? "/" : "") + filepath;
+ fetchUrls.push(fetchUrl);
+ });
+ });
+ const buffers = await fetchWeightsFunction(fetchUrls);
+ const weightsTensorMap = {};
+ let bufferIndexOffset = 0;
+ groupIndicesToFetch.forEach((i) => {
+ const numBuffers = manifest[i].paths.length;
+ let groupBytes = 0;
+ for (let i = 0; i < numBuffers; i++) {
+ groupBytes += buffers[bufferIndexOffset + i].byteLength;
+ }
+ // Create a buffer for the whole group.
+ const groupBuffer = new ArrayBuffer(groupBytes);
+ const groupByteBuffer = new Uint8Array(groupBuffer);
+ let groupBufferOffset = 0;
+ for (let i = 0; i < numBuffers; i++) {
+ const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);
+ groupByteBuffer.set(buffer, groupBufferOffset);
+ groupBufferOffset += buffer.byteLength;
+ }
+ const weightsEntries = groupWeightsToFetch[i];
+ weightsEntries.forEach((weightsEntry) => {
+ const byteBuffer = groupBuffer.slice(
+ weightsEntry.groupOffset,
+ weightsEntry.groupOffset + weightsEntry.sizeBytes
+ );
+ const nameToTensorMap = Object(io_utils["e" /* decodeWeights */])(byteBuffer, [
+ weightsEntry.manifestEntry,
+ ]);
+ for (const name in nameToTensorMap) {
+ weightsTensorMap[name] = nameToTensorMap[name];
+ }
+ });
+ bufferIndexOffset += numBuffers;
+ });
+ return weightsTensorMap;
+ };
+ }
+ //# sourceMappingURL=weights_loader.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/http.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * IOHandler implementations based on HTTP requests in the web browser.
+ *
+ * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).
+ */
+
+ const OCTET_STREAM_MIME_TYPE = "application/octet-stream";
+ const JSON_TYPE = "application/json";
+ class http_HTTPRequest {
+ constructor(path, loadOptions) {
+ this.DEFAULT_METHOD = "POST";
+ if (loadOptions == null) {
+ loadOptions = {};
+ }
+ this.weightPathPrefix = loadOptions.weightPathPrefix;
+ this.onProgress = loadOptions.onProgress;
+ if (loadOptions.fetchFunc != null) {
+ Object(util["assert"])(
+ typeof loadOptions.fetchFunc === "function",
+ () =>
+ "Must pass a function that matches the signature of " +
+ "`fetch` (see " +
+ "https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"
+ );
+ this.fetch = loadOptions.fetchFunc;
+ } else {
+ this.fetch = Object(environment["c" /* env */])().platform.fetch;
+ }
+ Object(util["assert"])(
+ path != null && path.length > 0,
+ () => "URL path for http must not be null, undefined or " + "empty."
+ );
+ if (Array.isArray(path)) {
+ Object(util["assert"])(
+ path.length === 2,
+ () =>
+ "URL paths for http must have a length of 2, " +
+ `(actual length is ${path.length}).`
+ );
+ }
+ this.path = path;
+ if (loadOptions.requestInit != null && loadOptions.requestInit.body != null) {
+ throw new Error("requestInit is expected to have no pre-existing body, but has one.");
+ }
+ this.requestInit = loadOptions.requestInit || {};
+ }
+ async save(modelArtifacts) {
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error(
+ "BrowserHTTPRequest.save() does not support saving model topology " +
+ "in binary formats yet."
+ );
+ }
+ const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);
+ init.body = new FormData();
+ const weightsManifest = [
+ {
+ paths: ["./model.weights.bin"],
+ weights: modelArtifacts.weightSpecs,
+ },
+ ];
+ const modelTopologyAndWeightManifest = {
+ modelTopology: modelArtifacts.modelTopology,
+ format: modelArtifacts.format,
+ generatedBy: modelArtifacts.generatedBy,
+ convertedBy: modelArtifacts.convertedBy,
+ userDefinedMetadata: modelArtifacts.userDefinedMetadata,
+ weightsManifest,
+ };
+ init.body.append(
+ "model.json",
+ new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }),
+ "model.json"
+ );
+ if (modelArtifacts.weightData != null) {
+ init.body.append(
+ "model.weights.bin",
+ new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }),
+ "model.weights.bin"
+ );
+ }
+ const response = await this.fetch(this.path, init);
+ if (response.ok) {
+ return {
+ modelArtifactsInfo: Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
+ modelArtifacts
+ ),
+ responses: [response],
+ };
+ } else {
+ throw new Error(
+ `BrowserHTTPRequest.save() failed due to HTTP response status ` +
+ `${response.status}.`
+ );
+ }
+ }
+ /**
+ * Load model artifacts via HTTP request(s).
+ *
+ * See the documentation to `tf.io.http` for details on the saved
+ * artifacts.
+ *
+ * @returns The loaded model artifacts (if loading succeeds).
+ */
+ async load() {
+ const modelConfigRequest = await this.fetch(this.path, this.requestInit);
+ if (!modelConfigRequest.ok) {
+ throw new Error(
+ `Request to ${this.path} failed with status code ` +
+ `${modelConfigRequest.status}. Please verify this URL points to ` +
+ `the model JSON of the model to load.`
+ );
+ }
+ let modelConfig;
+ try {
+ modelConfig = await modelConfigRequest.json();
+ } catch (e) {
+ let message = `Failed to parse model JSON of response from ${this.path}.`;
+ // TODO(nsthorat): Remove this after some time when we're comfortable that
+ // .pb files are mostly gone.
+ if (this.path.endsWith(".pb")) {
+ message +=
+ " Your path contains a .pb file extension. " +
+ "Support for .pb models have been removed in TensorFlow.js 1.0 " +
+ "in favor of .json models. You can re-convert your Python " +
+ "TensorFlow model using the TensorFlow.js 1.0 conversion scripts " +
+ "or you can convert your.pb models with the 'pb2json'" +
+ "NPM script in the tensorflow/tfjs-converter repository.";
+ } else {
+ message +=
+ " Please make sure the server is serving valid " + "JSON for this request.";
+ }
+ throw new Error(message);
+ }
+ const modelTopology = modelConfig.modelTopology;
+ const weightsManifest = modelConfig.weightsManifest;
+ const generatedBy = modelConfig.generatedBy;
+ const convertedBy = modelConfig.convertedBy;
+ const format = modelConfig.format;
+ const userDefinedMetadata = modelConfig.userDefinedMetadata;
+ // We do not allow both modelTopology and weightsManifest to be missing.
+ if (modelTopology == null && weightsManifest == null) {
+ throw new Error(
+ `The JSON from HTTP path ${this.path} contains neither model ` +
+ `topology or manifest for weights.`
+ );
+ }
+ let weightSpecs;
+ let weightData;
+ if (weightsManifest != null) {
+ const results = await this.loadWeights(weightsManifest);
+ [weightSpecs, weightData] = results;
+ }
+ return {
+ modelTopology,
+ weightSpecs,
+ weightData,
+ userDefinedMetadata,
+ generatedBy,
+ convertedBy,
+ format,
+ };
+ }
+ async loadWeights(weightsManifest) {
+ const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;
+ const [prefix, suffix] = parseUrl(weightPath);
+ const pathPrefix = this.weightPathPrefix || prefix;
+ const weightSpecs = [];
+ for (const entry of weightsManifest) {
+ weightSpecs.push(...entry.weights);
+ }
+ const fetchURLs = [];
+ weightsManifest.forEach((weightsGroup) => {
+ weightsGroup.paths.forEach((path) => {
+ fetchURLs.push(pathPrefix + path + suffix);
+ });
+ });
+ const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {
+ requestInit: this.requestInit,
+ fetchFunc: this.fetch,
+ onProgress: this.onProgress,
+ });
+ return [weightSpecs, Object(io_utils["d" /* concatenateArrayBuffers */])(buffers)];
+ }
+ }
+ http_HTTPRequest.URL_SCHEME_REGEX = /^https?:\/\//;
+ /**
+ * Extract the prefix and suffix of the url, where the prefix is the path before
+ * the last file, and suffix is the search params after the last file.
+ * ```
+ * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'
+ * [prefix, suffix] = parseUrl(url)
+ * // prefix = 'http://tfhub.dev/model/1/'
+ * // suffix = '?tfjs-format=file'
+ * ```
+ * @param url the model url to be parsed.
+ */
+ function parseUrl(url) {
+ const lastSlash = url.lastIndexOf("/");
+ const lastSearchParam = url.lastIndexOf("?");
+ const prefix = url.substring(0, lastSlash);
+ const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : "";
+ return [prefix + "/", suffix];
+ }
+ function isHTTPScheme(url) {
+ return url.match(http_HTTPRequest.URL_SCHEME_REGEX) != null;
+ }
+ const httpRouter = (url, loadOptions) => {
+ if (
+ typeof fetch === "undefined" &&
+ (loadOptions == null || loadOptions.fetchFunc == null)
+ ) {
+ // `http` uses `fetch` or `node-fetch`, if one wants to use it in
+ // an environment that is not the browser or node they have to setup a
+ // global fetch polyfill.
+ return null;
+ } else {
+ let isHTTP = true;
+ if (Array.isArray(url)) {
+ isHTTP = url.every((urlItem) => isHTTPScheme(urlItem));
+ } else {
+ isHTTP = isHTTPScheme(url);
+ }
+ if (isHTTP) {
+ return http(url, loadOptions);
+ }
+ }
+ return null;
+ };
+ IORouterRegistry.registerSaveRouter(httpRouter);
+ IORouterRegistry.registerLoadRouter(httpRouter);
+ /**
+ * Creates an IOHandler subtype that sends model artifacts to HTTP server.
+ *
+ * An HTTP request of the `multipart/form-data` mime type will be sent to the
+ * `path` URL. The form data includes artifacts that represent the topology
+ * and/or weights of the model. In the case of Keras-style `tf.Model`, two
+ * blobs (files) exist in form-data:
+ * - A JSON file consisting of `modelTopology` and `weightsManifest`.
+ * - A binary weights file consisting of the concatenated weight values.
+ * These files are in the same format as the one generated by
+ * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).
+ *
+ * The following code snippet exemplifies the client-side code that uses this
+ * function:
+ *
+ * ```js
+ * const model = tf.sequential();
+ * model.add(
+ * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
+ *
+ * const saveResult = await model.save(tf.io.http(
+ * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));
+ * console.log(saveResult);
+ * ```
+ *
+ * If the default `POST` method is to be used, without any custom parameters
+ * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:
+ *
+ * ```js
+ * const saveResult = await model.save('http://model-server:5000/upload');
+ * ```
+ *
+ * The following GitHub Gist
+ * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864
+ * implements a server based on [flask](https://github.com/pallets/flask) that
+ * can receive the request. Upon receiving the model artifacts via the requst,
+ * this particular server reconsistutes instances of [Keras
+ * Models](https://keras.io/models/model/) in memory.
+ *
+ *
+ * @param path A URL path to the model.
+ * Can be an absolute HTTP path (e.g.,
+ * 'http://localhost:8000/model-upload)') or a relative path (e.g.,
+ * './model-upload').
+ * @param requestInit Request configurations to be used when sending
+ * HTTP request to server using `fetch`. It can contain fields such as
+ * `method`, `credentials`, `headers`, `mode`, etc. See
+ * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request
+ * for more information. `requestInit` must not have a body, because the
+ * body will be set by TensorFlow.js. File blobs representing the model
+ * topology (filename: 'model.json') and the weights of the model (filename:
+ * 'model.weights.bin') will be appended to the body. If `requestInit` has a
+ * `body`, an Error will be thrown.
+ * @param loadOptions Optional configuration for the loading. It includes the
+ * following fields:
+ * - weightPathPrefix Optional, this specifies the path prefix for weight
+ * files, by default this is calculated from the path param.
+ * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,
+ * the `fetch` from node-fetch can be used here.
+ * - onProgress Optional, progress callback function, fired periodically
+ * before the load is completed.
+ * @returns An instance of `IOHandler`.
+ */
+ /**
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Loading',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function http(path, loadOptions) {
+ return new http_HTTPRequest(path, loadOptions);
+ }
+ /**
+ * Deprecated. Use `tf.io.http`.
+ * @param path
+ * @param loadOptions
+ */
+ function browserHTTPRequest(path, loadOptions) {
+ return http(path, loadOptions);
+ }
+ //# sourceMappingURL=http.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ class PassthroughLoader {
+ constructor(modelArtifacts) {
+ this.modelArtifacts = modelArtifacts;
+ }
+ async load() {
+ return this.modelArtifacts;
+ }
+ }
+ class PassthroughSaver {
+ constructor(saveHandler) {
+ this.saveHandler = saveHandler;
+ }
+ async save(modelArtifacts) {
+ return this.saveHandler(modelArtifacts);
+ }
+ }
+ /**
+ * Creates an IOHandler that loads model artifacts from memory.
+ *
+ * When used in conjunction with `tf.loadLayersModel`, an instance of
+ * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
+ *
+ * ```js
+ * const model = await tf.loadLayersModel(tf.io.fromMemory(
+ * modelTopology, weightSpecs, weightData));
+ * ```
+ *
+ * @param modelArtifacts a object containing model topology (i.e., parsed from
+ * the JSON format).
+ * @param weightSpecs An array of `WeightsManifestEntry` objects describing the
+ * names, shapes, types, and quantization of the weight data.
+ * @param weightData A single `ArrayBuffer` containing the weight data,
+ * concatenated in the order described by the weightSpecs.
+ * @param trainingConfig Model training configuration. Optional.
+ *
+ * @returns A passthrough `IOHandler` that simply loads the provided data.
+ */
+ function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {
+ if (arguments.length === 1) {
+ const isModelArtifacts =
+ modelArtifacts.modelTopology != null || modelArtifacts.weightSpecs != null;
+ if (isModelArtifacts) {
+ return new PassthroughLoader(modelArtifacts);
+ } else {
+ // Legacy support: with only modelTopology.
+ // TODO(cais): Remove this deprecated API.
+ console.warn(
+ "Please call tf.io.fromMemory() with only one argument. " +
+ "The argument should be of type ModelArtifacts. " +
+ "The multi-argument signature of tf.io.fromMemory() has been " +
+ "deprecated and will be removed in a future release."
+ );
+ return new PassthroughLoader({ modelTopology: modelArtifacts });
+ }
+ } else {
+ // Legacy support.
+ // TODO(cais): Remove this deprecated API.
+ console.warn(
+ "Please call tf.io.fromMemory() with only one argument. " +
+ "The argument should be of type ModelArtifacts. " +
+ "The multi-argument signature of tf.io.fromMemory() has been " +
+ "deprecated and will be removed in a future release."
+ );
+ return new PassthroughLoader({
+ modelTopology: modelArtifacts,
+ weightSpecs,
+ weightData,
+ trainingConfig,
+ });
+ }
+ }
+ /**
+ * Creates an IOHandler that passes saved model artifacts to a callback.
+ *
+ * ```js
+ * function handleSave(artifacts) {
+ * // ... do something with the artifacts ...
+ * return {modelArtifactsInfo: {...}, ...};
+ * }
+ *
+ * const saveResult = model.save(tf.io.withSaveHandler(handleSave));
+ * ```
+ *
+ * @param saveHandler A function that accepts a `ModelArtifacts` and returns a
+ * `SaveResult`.
+ */
+ function withSaveHandler(saveHandler) {
+ return new PassthroughSaver(saveHandler);
+ }
+ //# sourceMappingURL=passthrough.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/io.js
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Importing local_storage and indexed_db is necessary for the routers to be
+ // registered.
+
+ //# sourceMappingURL=io.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take
+ * value `onValue` (defaults to 1), while all other locations take value
+ * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank
+ * `R+1` with the last axis of size `depth`.
+ *
+ * ```js
+ * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();
+ * ```
+ *
+ * @param indices `tf.Tensor` of indices with dtype `int32`.
+ * @param depth The depth of the one hot dimension.
+ * @param onValue A number used to fill in the output when the index matches
+ * the location.
+ * @param offValue A number used to fill in the output when the index does
+ * not match the location.
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Creation'} */
+ function oneHot_(indices, depth, onValue = 1, offValue = 0) {
+ if (depth < 2) {
+ throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);
+ }
+ let $indices = Object(tensor_util_env["a" /* convertToTensor */])(
+ indices,
+ "indices",
+ "oneHot",
+ "int32"
+ );
+ const outShape = [...$indices.shape, depth];
+ $indices = $indices.flatten();
+ const forward = (backend, save) => {
+ save([$indices]);
+ return reshape(backend.oneHot($indices, depth, onValue, offValue), outShape);
+ };
+ const inputs = { indices: $indices };
+ const attrs = { depth, onValue, offValue };
+ return engine["a" /* ENGINE */].runKernelFunc(
+ forward,
+ inputs,
+ null /* grad */,
+ kernel_names["cb" /* OneHot */],
+ attrs
+ );
+ }
+ const oneHot = Object(operation["a" /* op */])({ oneHot_ });
+ //# sourceMappingURL=one_hot.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ /**
+ * Computes the confusion matrix from true labels and predicted labels.
+ *
+ * ```js
+ * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');
+ * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');
+ * const numClasses = 3;
+ * const out = tf.math.confusionMatrix(labels, predictions, numClasses);
+ * out.print();
+ * // Expected output matrix:
+ * // [[2, 0, 0],
+ * // [0, 1, 1],
+ * // [0, 0, 1]]
+ * ```
+ *
+ * @param labels The target labels, assumed to be 0-based integers
+ * for the classes. The shape is `[numExamples]`, where
+ * `numExamples` is the number of examples included.
+ * @param predictions The predicted classes, assumed to be
+ * 0-based integers for the classes. Must have the same shape as `labels`.
+ * @param numClasses Number of all classes, as an integer.
+ * Its value must be larger than the largest element in `labels` and
+ * `predictions`.
+ * @returns The confusion matrix as a int32-type 2D tensor. The value at
+ * row `r` and column `c` is the number of times examples of actual class
+ * `r` were predicted as class `c`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Evaluation'} */
+ function confusionMatrix_(labels, predictions, numClasses) {
+ const $labels = Object(tensor_util_env["a" /* convertToTensor */])(
+ labels,
+ "labels",
+ "confusionMatrix"
+ );
+ const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(
+ predictions,
+ "predictions",
+ "confusionMatrix"
+ );
+ util["assert"](
+ numClasses == null || (numClasses > 0 && Number.isInteger(numClasses)),
+ () => `If provided, numClasses must be a positive integer, ` + `but got ${numClasses}`
+ );
+ util["assert"](
+ $labels.rank === 1,
+ () => `Expected the rank of labels to be 1, but got ${$labels.rank}`
+ );
+ util["assert"](
+ $predictions.rank === 1,
+ () => `Expected the rank of predictions to be 1, ` + `but got ${$predictions.rank}`
+ );
+ util["assert"](
+ $labels.shape[0] === $predictions.shape[0],
+ () =>
+ `Mismatch in the number of examples: ` +
+ `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +
+ `Labels and predictions should have the same number of elements.`
+ );
+ util["assert"](
+ numClasses > 0 && Number.isInteger(numClasses),
+ () => `numClasses is required to be a positive integer, but got ` + `${numClasses}`
+ );
+ // TODO(cais): In the future, if oneHot supports tensors inputs for
+ // `numClasses`, `confusionMatrix` can make `numClasses` optional.
+ const oneHotLabels = oneHot($labels.asType("int32"), numClasses);
+ const oneHotPredictions = oneHot($predictions.asType("int32"), numClasses);
+ const oneHotLabelsT = oneHotLabels.transpose();
+ return oneHotLabelsT.matMul(oneHotPredictions).asType("int32");
+ }
+ const confusionMatrix = Object(operation["a" /* op */])({ confusionMatrix_ });
+ //# sourceMappingURL=confusion_matrix.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/math.js
+ /**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Exports under the tf.math.* namespace.
+ */
+
+ //# sourceMappingURL=math.js.map
+ // CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/browser.js
+ /**
+ * @license
+ * Copyright 2019 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ let fromPixels2DContext;
+ /**
+ * Creates a `tf.Tensor` from an image.
+ *
+ * ```js
+ * const image = new ImageData(1, 1);
+ * image.data[0] = 100;
+ * image.data[1] = 150;
+ * image.data[2] = 200;
+ * image.data[3] = 255;
+ *
+ * tf.browser.fromPixels(image).print();
+ * ```
+ *
+ * @param pixels The input image to construct the tensor from. The
+ * supported image types are all 4-channel. You can also pass in an image
+ * object with following attributes:
+ * `{data: Uint8Array; width: number; height: number}`
+ * @param numChannels The number of channels of the output tensor. A
+ * numChannels value less than 4 allows you to ignore channels. Defaults to
+ * 3 (ignores alpha channel of input image).
+ */
+ /** @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true} */
+ function fromPixels_(pixels, numChannels = 3) {
+ // Sanity checks.
+ if (numChannels > 4) {
+ throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");
+ }
+ if (pixels == null) {
+ throw new Error("pixels passed to tf.browser.fromPixels() can not be null");
+ }
+ let isPixelData = false;
+ let isImageData = false;
+ let isVideo = false;
+ let isImage = false;
+ let isCanvasLike = false;
+ if (pixels.data instanceof Uint8Array) {
+ isPixelData = true;
+ } else if (typeof ImageData !== "undefined" && pixels instanceof ImageData) {
+ isImageData = true;
+ } else if (typeof HTMLVideoElement !== "undefined" && pixels instanceof HTMLVideoElement) {
+ isVideo = true;
+ } else if (typeof HTMLImageElement !== "undefined" && pixels instanceof HTMLImageElement) {
+ isImage = true;
+ // tslint:disable-next-line: no-any
+ } else if (pixels.getContext != null) {
+ isCanvasLike = true;
+ } else {
+ throw new Error(
+ "pixels passed to tf.browser.fromPixels() must be either an " +
+ `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +
+ `in browser, or OffscreenCanvas, ImageData in webworker` +
+ ` or {data: Uint32Array, width: number, height: number}, ` +
+ `but was ${pixels.constructor.name}`
+ );
+ }
+ if (isVideo) {
+ const HAVE_CURRENT_DATA_READY_STATE = 2;
+ if (isVideo && pixels.readyState < HAVE_CURRENT_DATA_READY_STATE) {
+ throw new Error(
+ "The video element has not loaded data yet. Please wait for " +
+ "`loadeddata` event on the