/*!
*
* WebGazer.js: Scalable Webcam EyeTracking Using User Interactions
* Copyright (c) 2016-2020, Brown HCI Group
* Licensed under GPLv3. Companies with a valuation of less than $1M can use WebGazer.js under LGPLv3.
*
*/
var webgazer = /******/ (function (modules) {
// webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if (installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/
}
/******/ // Create a new module (and put it into the cache)
/******/ var module = (installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {},
/******/
});
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/
}
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function (exports, name, getter) {
/******/ if (!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/
}
/******/
};
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function (exports) {
/******/ if (typeof Symbol !== "undefined" && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
/******/
}
/******/ Object.defineProperty(exports, "__esModule", { value: true });
/******/
};
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function (value, mode) {
/******/ if (mode & 1) value = __webpack_require__(value);
/******/ if (mode & 8) return value;
/******/ if (mode & 4 && typeof value === "object" && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, "default", { enumerable: true, value: value });
/******/ if (mode & 2 && typeof value != "string")
for (var key in value)
__webpack_require__.d(
ns,
key,
function (key) {
return value[key];
}.bind(null, key)
);
/******/ return ns;
/******/
};
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function (module) {
/******/ var getter =
module && module.__esModule
? /******/ function getDefault() {
return module["default"];
}
: /******/ function getModuleExports() {
return module;
};
/******/ __webpack_require__.d(getter, "a", getter);
/******/ return getter;
/******/
};
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function (object, property) {
return Object.prototype.hasOwnProperty.call(object, property);
};
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__((__webpack_require__.s = 90));
/******/
})(
/************************************************************************/
/******/ [
/* 0 */
/***/ function (module, __webpack_exports__, __webpack_require__) {
"use strict";
// ESM COMPAT FLAG
__webpack_require__.r(__webpack_exports__);
// EXPORTS
__webpack_require__.d(__webpack_exports__, "AdadeltaOptimizer", function () {
return /* reexport */ adadelta_optimizer_AdadeltaOptimizer;
});
__webpack_require__.d(__webpack_exports__, "AdagradOptimizer", function () {
return /* reexport */ adagrad_optimizer_AdagradOptimizer;
});
__webpack_require__.d(__webpack_exports__, "AdamOptimizer", function () {
return /* reexport */ adam_optimizer_AdamOptimizer;
});
__webpack_require__.d(__webpack_exports__, "AdamaxOptimizer", function () {
return /* reexport */ adamax_optimizer_AdamaxOptimizer;
});
__webpack_require__.d(__webpack_exports__, "MomentumOptimizer", function () {
return /* reexport */ momentum_optimizer_MomentumOptimizer;
});
__webpack_require__.d(__webpack_exports__, "Optimizer", function () {
return /* reexport */ optimizer_Optimizer;
});
__webpack_require__.d(__webpack_exports__, "RMSPropOptimizer", function () {
return /* reexport */ rmsprop_optimizer_RMSPropOptimizer;
});
__webpack_require__.d(__webpack_exports__, "SGDOptimizer", function () {
return /* reexport */ sgd_optimizer_SGDOptimizer;
});
__webpack_require__.d(__webpack_exports__, "Tensor", function () {
return /* reexport */ dist_tensor["a" /* Tensor */];
});
__webpack_require__.d(__webpack_exports__, "TensorBuffer", function () {
return /* reexport */ dist_tensor["b" /* TensorBuffer */];
});
__webpack_require__.d(__webpack_exports__, "Variable", function () {
return /* reexport */ dist_tensor["c" /* Variable */];
});
__webpack_require__.d(__webpack_exports__, "Rank", function () {
return /* reexport */ dist_types["a" /* Rank */];
});
__webpack_require__.d(__webpack_exports__, "sumOutType", function () {
return /* reexport */ dist_types["b" /* sumOutType */];
});
__webpack_require__.d(__webpack_exports__, "upcastType", function () {
return /* reexport */ dist_types["c" /* upcastType */];
});
__webpack_require__.d(__webpack_exports__, "add", function () {
return /* reexport */ add;
});
__webpack_require__.d(__webpack_exports__, "addN", function () {
return /* reexport */ addN;
});
__webpack_require__.d(__webpack_exports__, "atan2", function () {
return /* reexport */ atan2;
});
__webpack_require__.d(__webpack_exports__, "avgPool", function () {
return /* reexport */ avgPool;
});
__webpack_require__.d(__webpack_exports__, "avgPool3d", function () {
return /* reexport */ avgPool3d;
});
__webpack_require__.d(__webpack_exports__, "batchToSpaceND", function () {
return /* reexport */ batchToSpaceND;
});
__webpack_require__.d(__webpack_exports__, "batchNorm", function () {
return /* reexport */ batchNorm;
});
__webpack_require__.d(__webpack_exports__, "batchNorm2d", function () {
return /* reexport */ batchNorm2d;
});
__webpack_require__.d(__webpack_exports__, "batchNorm3d", function () {
return /* reexport */ batchNorm3d;
});
__webpack_require__.d(__webpack_exports__, "batchNorm4d", function () {
return /* reexport */ batchNorm4d;
});
__webpack_require__.d(__webpack_exports__, "broadcastTo", function () {
return /* reexport */ broadcastTo;
});
__webpack_require__.d(__webpack_exports__, "clone", function () {
return /* reexport */ clone;
});
__webpack_require__.d(__webpack_exports__, "complex", function () {
return /* reexport */ complex["a" /* complex */];
});
__webpack_require__.d(__webpack_exports__, "concat", function () {
return /* reexport */ concat;
});
__webpack_require__.d(__webpack_exports__, "concat1d", function () {
return /* reexport */ concat1d;
});
__webpack_require__.d(__webpack_exports__, "concat2d", function () {
return /* reexport */ concat2d;
});
__webpack_require__.d(__webpack_exports__, "concat3d", function () {
return /* reexport */ concat3d;
});
__webpack_require__.d(__webpack_exports__, "concat4d", function () {
return /* reexport */ concat4d;
});
__webpack_require__.d(__webpack_exports__, "conv1d", function () {
return /* reexport */ conv1d;
});
__webpack_require__.d(__webpack_exports__, "conv2d", function () {
return /* reexport */ conv2d;
});
__webpack_require__.d(__webpack_exports__, "conv2dTranspose", function () {
return /* reexport */ conv2dTranspose;
});
__webpack_require__.d(__webpack_exports__, "conv3d", function () {
return /* reexport */ conv3d;
});
__webpack_require__.d(__webpack_exports__, "conv3dTranspose", function () {
return /* reexport */ conv3dTranspose;
});
__webpack_require__.d(__webpack_exports__, "cumsum", function () {
return /* reexport */ cumsum;
});
__webpack_require__.d(__webpack_exports__, "depthToSpace", function () {
return /* reexport */ depthToSpace;
});
__webpack_require__.d(__webpack_exports__, "depthwiseConv2d", function () {
return /* reexport */ depthwiseConv2d;
});
__webpack_require__.d(__webpack_exports__, "diag", function () {
return /* reexport */ diag;
});
__webpack_require__.d(__webpack_exports__, "div", function () {
return /* reexport */ div;
});
__webpack_require__.d(__webpack_exports__, "divNoNan", function () {
return /* reexport */ divNoNan;
});
__webpack_require__.d(__webpack_exports__, "dot", function () {
return /* reexport */ dot;
});
__webpack_require__.d(__webpack_exports__, "elu", function () {
return /* reexport */ elu;
});
__webpack_require__.d(__webpack_exports__, "equal", function () {
return /* reexport */ equal;
});
__webpack_require__.d(__webpack_exports__, "eye", function () {
return /* reexport */ eye;
});
__webpack_require__.d(__webpack_exports__, "fill", function () {
return /* reexport */ fill;
});
__webpack_require__.d(__webpack_exports__, "floorDiv", function () {
return /* reexport */ floorDiv;
});
__webpack_require__.d(__webpack_exports__, "greater", function () {
return /* reexport */ greater;
});
__webpack_require__.d(__webpack_exports__, "greaterEqual", function () {
return /* reexport */ greaterEqual;
});
__webpack_require__.d(__webpack_exports__, "imag", function () {
return /* reexport */ imag["a" /* imag */];
});
__webpack_require__.d(__webpack_exports__, "leakyRelu", function () {
return /* reexport */ leakyRelu;
});
__webpack_require__.d(__webpack_exports__, "less", function () {
return /* reexport */ less;
});
__webpack_require__.d(__webpack_exports__, "lessEqual", function () {
return /* reexport */ lessEqual;
});
__webpack_require__.d(__webpack_exports__, "localResponseNormalization", function () {
return /* reexport */ localResponseNormalization;
});
__webpack_require__.d(__webpack_exports__, "matMul", function () {
return /* reexport */ matMul;
});
__webpack_require__.d(__webpack_exports__, "max", function () {
return /* reexport */ max_max;
});
__webpack_require__.d(__webpack_exports__, "maxPool", function () {
return /* reexport */ maxPool;
});
__webpack_require__.d(__webpack_exports__, "maxPool3d", function () {
return /* reexport */ maxPool3d;
});
__webpack_require__.d(__webpack_exports__, "maxPoolWithArgmax", function () {
return /* reexport */ maxPoolWithArgmax;
});
__webpack_require__.d(__webpack_exports__, "maximum", function () {
return /* reexport */ maximum;
});
__webpack_require__.d(__webpack_exports__, "minimum", function () {
return /* reexport */ minimum;
});
__webpack_require__.d(__webpack_exports__, "mod", function () {
return /* reexport */ mod;
});
__webpack_require__.d(__webpack_exports__, "mul", function () {
return /* reexport */ mul;
});
__webpack_require__.d(__webpack_exports__, "multinomial", function () {
return /* reexport */ multinomial;
});
__webpack_require__.d(__webpack_exports__, "notEqual", function () {
return /* reexport */ notEqual;
});
__webpack_require__.d(__webpack_exports__, "oneHot", function () {
return /* reexport */ oneHot;
});
__webpack_require__.d(__webpack_exports__, "outerProduct", function () {
return /* reexport */ outerProduct;
});
__webpack_require__.d(__webpack_exports__, "pad", function () {
return /* reexport */ pad_pad;
});
__webpack_require__.d(__webpack_exports__, "pad1d", function () {
return /* reexport */ pad1d;
});
__webpack_require__.d(__webpack_exports__, "pad2d", function () {
return /* reexport */ pad2d;
});
__webpack_require__.d(__webpack_exports__, "pad3d", function () {
return /* reexport */ pad3d;
});
__webpack_require__.d(__webpack_exports__, "pad4d", function () {
return /* reexport */ pad4d;
});
__webpack_require__.d(__webpack_exports__, "pool", function () {
return /* reexport */ pool;
});
__webpack_require__.d(__webpack_exports__, "pow", function () {
return /* reexport */ pow;
});
__webpack_require__.d(__webpack_exports__, "prelu", function () {
return /* reexport */ prelu;
});
__webpack_require__.d(__webpack_exports__, "rand", function () {
return /* reexport */ rand;
});
__webpack_require__.d(__webpack_exports__, "randomGamma", function () {
return /* reexport */ randomGamma;
});
__webpack_require__.d(__webpack_exports__, "randomNormal", function () {
return /* reexport */ randomNormal;
});
__webpack_require__.d(__webpack_exports__, "randomUniform", function () {
return /* reexport */ randomUniform;
});
__webpack_require__.d(__webpack_exports__, "real", function () {
return /* reexport */ real["a" /* real */];
});
__webpack_require__.d(__webpack_exports__, "relu", function () {
return /* reexport */ relu;
});
__webpack_require__.d(__webpack_exports__, "relu6", function () {
return /* reexport */ relu6;
});
__webpack_require__.d(__webpack_exports__, "selu", function () {
return /* reexport */ selu;
});
__webpack_require__.d(__webpack_exports__, "separableConv2d", function () {
return /* reexport */ separableConv2d;
});
__webpack_require__.d(__webpack_exports__, "spaceToBatchND", function () {
return /* reexport */ spaceToBatchND;
});
__webpack_require__.d(__webpack_exports__, "split", function () {
return /* reexport */ split;
});
__webpack_require__.d(__webpack_exports__, "square", function () {
return /* reexport */ square;
});
__webpack_require__.d(__webpack_exports__, "squaredDifference", function () {
return /* reexport */ squaredDifference;
});
__webpack_require__.d(__webpack_exports__, "sub", function () {
return /* reexport */ sub;
});
__webpack_require__.d(__webpack_exports__, "tile", function () {
return /* reexport */ tile;
});
__webpack_require__.d(__webpack_exports__, "truncatedNormal", function () {
return /* reexport */ truncatedNormal;
});
__webpack_require__.d(__webpack_exports__, "booleanMaskAsync", function () {
return /* reexport */ booleanMaskAsync;
});
__webpack_require__.d(__webpack_exports__, "reverse", function () {
return /* reexport */ reverse_reverse;
});
__webpack_require__.d(__webpack_exports__, "reverse1d", function () {
return /* reexport */ reverse1d;
});
__webpack_require__.d(__webpack_exports__, "reverse2d", function () {
return /* reexport */ reverse2d;
});
__webpack_require__.d(__webpack_exports__, "reverse3d", function () {
return /* reexport */ reverse3d;
});
__webpack_require__.d(__webpack_exports__, "reverse4d", function () {
return /* reexport */ reverse4d;
});
__webpack_require__.d(__webpack_exports__, "slice", function () {
return /* reexport */ slice;
});
__webpack_require__.d(__webpack_exports__, "slice1d", function () {
return /* reexport */ slice1d;
});
__webpack_require__.d(__webpack_exports__, "slice2d", function () {
return /* reexport */ slice2d;
});
__webpack_require__.d(__webpack_exports__, "slice3d", function () {
return /* reexport */ slice3d;
});
__webpack_require__.d(__webpack_exports__, "slice4d", function () {
return /* reexport */ slice4d;
});
__webpack_require__.d(__webpack_exports__, "abs", function () {
return /* reexport */ abs;
});
__webpack_require__.d(__webpack_exports__, "acos", function () {
return /* reexport */ acos;
});
__webpack_require__.d(__webpack_exports__, "acosh", function () {
return /* reexport */ acosh;
});
__webpack_require__.d(__webpack_exports__, "asin", function () {
return /* reexport */ asin;
});
__webpack_require__.d(__webpack_exports__, "asinh", function () {
return /* reexport */ asinh;
});
__webpack_require__.d(__webpack_exports__, "atan", function () {
return /* reexport */ atan;
});
__webpack_require__.d(__webpack_exports__, "atanh", function () {
return /* reexport */ atanh;
});
__webpack_require__.d(__webpack_exports__, "ceil", function () {
return /* reexport */ ceil;
});
__webpack_require__.d(__webpack_exports__, "clipByValue", function () {
return /* reexport */ clipByValue;
});
__webpack_require__.d(__webpack_exports__, "cos", function () {
return /* reexport */ cos;
});
__webpack_require__.d(__webpack_exports__, "cosh", function () {
return /* reexport */ cosh;
});
__webpack_require__.d(__webpack_exports__, "erf", function () {
return /* reexport */ erf;
});
__webpack_require__.d(__webpack_exports__, "exp", function () {
return /* reexport */ unary_ops_exp;
});
__webpack_require__.d(__webpack_exports__, "expm1", function () {
return /* reexport */ expm1;
});
__webpack_require__.d(__webpack_exports__, "floor", function () {
return /* reexport */ floor;
});
__webpack_require__.d(__webpack_exports__, "log", function () {
return /* reexport */ log;
});
__webpack_require__.d(__webpack_exports__, "log1p", function () {
return /* reexport */ log1p;
});
__webpack_require__.d(__webpack_exports__, "logSigmoid", function () {
return /* reexport */ logSigmoid;
});
__webpack_require__.d(__webpack_exports__, "neg", function () {
return /* reexport */ neg;
});
__webpack_require__.d(__webpack_exports__, "reciprocal", function () {
return /* reexport */ reciprocal;
});
__webpack_require__.d(__webpack_exports__, "round", function () {
return /* reexport */ round;
});
__webpack_require__.d(__webpack_exports__, "rsqrt", function () {
return /* reexport */ rsqrt;
});
__webpack_require__.d(__webpack_exports__, "sigmoid", function () {
return /* reexport */ sigmoid;
});
__webpack_require__.d(__webpack_exports__, "sign", function () {
return /* reexport */ sign;
});
__webpack_require__.d(__webpack_exports__, "isNaN", function () {
return /* reexport */ unary_ops_isNaN;
});
__webpack_require__.d(__webpack_exports__, "isInf", function () {
return /* reexport */ isInf;
});
__webpack_require__.d(__webpack_exports__, "isFinite", function () {
return /* reexport */ unary_ops_isFinite;
});
__webpack_require__.d(__webpack_exports__, "sin", function () {
return /* reexport */ sin;
});
__webpack_require__.d(__webpack_exports__, "sinh", function () {
return /* reexport */ sinh;
});
__webpack_require__.d(__webpack_exports__, "softplus", function () {
return /* reexport */ softplus;
});
__webpack_require__.d(__webpack_exports__, "sqrt", function () {
return /* reexport */ sqrt;
});
__webpack_require__.d(__webpack_exports__, "step", function () {
return /* reexport */ unary_ops_step;
});
__webpack_require__.d(__webpack_exports__, "tan", function () {
return /* reexport */ tan;
});
__webpack_require__.d(__webpack_exports__, "tanh", function () {
return /* reexport */ tanh;
});
__webpack_require__.d(__webpack_exports__, "all", function () {
return /* reexport */ reduction_ops_all;
});
__webpack_require__.d(__webpack_exports__, "any", function () {
return /* reexport */ any;
});
__webpack_require__.d(__webpack_exports__, "argMax", function () {
return /* reexport */ argMax;
});
__webpack_require__.d(__webpack_exports__, "argMin", function () {
return /* reexport */ argMin;
});
__webpack_require__.d(__webpack_exports__, "logSumExp", function () {
return /* reexport */ logSumExp;
});
__webpack_require__.d(__webpack_exports__, "mean", function () {
return /* reexport */ reduction_ops_mean;
});
__webpack_require__.d(__webpack_exports__, "min", function () {
return /* reexport */ reduction_ops_min;
});
__webpack_require__.d(__webpack_exports__, "moments", function () {
return /* reexport */ moments;
});
__webpack_require__.d(__webpack_exports__, "sum", function () {
return /* reexport */ sum;
});
__webpack_require__.d(__webpack_exports__, "prod", function () {
return /* reexport */ reduction_ops_prod;
});
__webpack_require__.d(__webpack_exports__, "equalStrict", function () {
return /* reexport */ equalStrict;
});
__webpack_require__.d(__webpack_exports__, "greaterEqualStrict", function () {
return /* reexport */ greaterEqualStrict;
});
__webpack_require__.d(__webpack_exports__, "greaterStrict", function () {
return /* reexport */ greaterStrict;
});
__webpack_require__.d(__webpack_exports__, "lessEqualStrict", function () {
return /* reexport */ lessEqualStrict;
});
__webpack_require__.d(__webpack_exports__, "lessStrict", function () {
return /* reexport */ lessStrict;
});
__webpack_require__.d(__webpack_exports__, "notEqualStrict", function () {
return /* reexport */ notEqualStrict;
});
__webpack_require__.d(__webpack_exports__, "addStrict", function () {
return /* reexport */ addStrict;
});
__webpack_require__.d(__webpack_exports__, "divStrict", function () {
return /* reexport */ divStrict;
});
__webpack_require__.d(__webpack_exports__, "maximumStrict", function () {
return /* reexport */ maximumStrict;
});
__webpack_require__.d(__webpack_exports__, "minimumStrict", function () {
return /* reexport */ minimumStrict;
});
__webpack_require__.d(__webpack_exports__, "modStrict", function () {
return /* reexport */ modStrict;
});
__webpack_require__.d(__webpack_exports__, "mulStrict", function () {
return /* reexport */ mulStrict;
});
__webpack_require__.d(__webpack_exports__, "powStrict", function () {
return /* reexport */ powStrict;
});
__webpack_require__.d(__webpack_exports__, "squaredDifferenceStrict", function () {
return /* reexport */ squaredDifferenceStrict;
});
__webpack_require__.d(__webpack_exports__, "subStrict", function () {
return /* reexport */ subStrict;
});
__webpack_require__.d(__webpack_exports__, "logicalAnd", function () {
return /* reexport */ logicalAnd;
});
__webpack_require__.d(__webpack_exports__, "logicalNot", function () {
return /* reexport */ logicalNot;
});
__webpack_require__.d(__webpack_exports__, "logicalOr", function () {
return /* reexport */ logicalOr;
});
__webpack_require__.d(__webpack_exports__, "logicalXor", function () {
return /* reexport */ logicalXor;
});
__webpack_require__.d(__webpack_exports__, "where", function () {
return /* reexport */ where;
});
__webpack_require__.d(__webpack_exports__, "whereAsync", function () {
return /* reexport */ whereAsync;
});
__webpack_require__.d(__webpack_exports__, "buffer", function () {
return /* reexport */ array_ops_buffer;
});
__webpack_require__.d(__webpack_exports__, "print", function () {
return /* reexport */ print;
});
__webpack_require__.d(__webpack_exports__, "cast", function () {
return /* reexport */ cast;
});
__webpack_require__.d(__webpack_exports__, "expandDims", function () {
return /* reexport */ expandDims;
});
__webpack_require__.d(__webpack_exports__, "reshape", function () {
return /* reexport */ reshape;
});
__webpack_require__.d(__webpack_exports__, "squeeze", function () {
return /* reexport */ squeeze;
});
__webpack_require__.d(__webpack_exports__, "stack", function () {
return /* reexport */ stack;
});
__webpack_require__.d(__webpack_exports__, "unstack", function () {
return /* reexport */ unstack;
});
__webpack_require__.d(__webpack_exports__, "setdiff1dAsync", function () {
return /* reexport */ setdiff1dAsync;
});
__webpack_require__.d(__webpack_exports__, "linspace", function () {
return /* reexport */ tensor_ops["a" /* linspace */];
});
__webpack_require__.d(__webpack_exports__, "ones", function () {
return /* reexport */ tensor_ops["b" /* ones */];
});
__webpack_require__.d(__webpack_exports__, "range", function () {
return /* reexport */ tensor_ops["d" /* range */];
});
__webpack_require__.d(__webpack_exports__, "scalar", function () {
return /* reexport */ tensor_ops["e" /* scalar */];
});
__webpack_require__.d(__webpack_exports__, "tensor", function () {
return /* reexport */ tensor_ops["f" /* tensor */];
});
__webpack_require__.d(__webpack_exports__, "tensor1d", function () {
return /* reexport */ tensor_ops["g" /* tensor1d */];
});
__webpack_require__.d(__webpack_exports__, "tensor2d", function () {
return /* reexport */ tensor_ops["h" /* tensor2d */];
});
__webpack_require__.d(__webpack_exports__, "tensor3d", function () {
return /* reexport */ tensor_ops["i" /* tensor3d */];
});
__webpack_require__.d(__webpack_exports__, "tensor4d", function () {
return /* reexport */ tensor_ops["j" /* tensor4d */];
});
__webpack_require__.d(__webpack_exports__, "tensor5d", function () {
return /* reexport */ tensor_ops["k" /* tensor5d */];
});
__webpack_require__.d(__webpack_exports__, "tensor6d", function () {
return /* reexport */ tensor_ops["l" /* tensor6d */];
});
__webpack_require__.d(__webpack_exports__, "variable", function () {
return /* reexport */ tensor_ops["m" /* variable */];
});
__webpack_require__.d(__webpack_exports__, "zeros", function () {
return /* reexport */ tensor_ops["n" /* zeros */];
});
__webpack_require__.d(__webpack_exports__, "onesLike", function () {
return /* reexport */ tensor_ops["c" /* onesLike */];
});
__webpack_require__.d(__webpack_exports__, "zerosLike", function () {
return /* reexport */ tensor_ops["o" /* zerosLike */];
});
__webpack_require__.d(__webpack_exports__, "transpose", function () {
return /* reexport */ transpose;
});
__webpack_require__.d(__webpack_exports__, "softmax", function () {
return /* reexport */ softmax;
});
__webpack_require__.d(__webpack_exports__, "logSoftmax", function () {
return /* reexport */ logSoftmax;
});
__webpack_require__.d(__webpack_exports__, "norm", function () {
return /* reexport */ norm_norm;
});
__webpack_require__.d(__webpack_exports__, "gather", function () {
return /* reexport */ gather;
});
__webpack_require__.d(__webpack_exports__, "unsortedSegmentSum", function () {
return /* reexport */ unsortedSegmentSum;
});
__webpack_require__.d(__webpack_exports__, "basicLSTMCell", function () {
return /* reexport */ basicLSTMCell;
});
__webpack_require__.d(__webpack_exports__, "multiRNNCell", function () {
return /* reexport */ multiRNNCell;
});
__webpack_require__.d(__webpack_exports__, "movingAverage", function () {
return /* reexport */ movingAverage;
});
__webpack_require__.d(__webpack_exports__, "stridedSlice", function () {
return /* reexport */ stridedSlice;
});
__webpack_require__.d(__webpack_exports__, "topk", function () {
return /* reexport */ topk;
});
__webpack_require__.d(__webpack_exports__, "scatterND", function () {
return /* reexport */ scatterND;
});
__webpack_require__.d(__webpack_exports__, "fft", function () {
return /* reexport */ fft;
});
__webpack_require__.d(__webpack_exports__, "ifft", function () {
return /* reexport */ ifft;
});
__webpack_require__.d(__webpack_exports__, "rfft", function () {
return /* reexport */ rfft;
});
__webpack_require__.d(__webpack_exports__, "irfft", function () {
return /* reexport */ irfft;
});
__webpack_require__.d(__webpack_exports__, "sparseToDense", function () {
return /* reexport */ sparseToDense;
});
__webpack_require__.d(__webpack_exports__, "gatherND", function () {
return /* reexport */ gatherND;
});
__webpack_require__.d(__webpack_exports__, "dropout", function () {
return /* reexport */ dropout;
});
__webpack_require__.d(__webpack_exports__, "hannWindow", function () {
return /* reexport */ hannWindow;
});
__webpack_require__.d(__webpack_exports__, "hammingWindow", function () {
return /* reexport */ hammingWindow;
});
__webpack_require__.d(__webpack_exports__, "frame", function () {
return /* reexport */ signal_ops_frame;
});
__webpack_require__.d(__webpack_exports__, "stft", function () {
return /* reexport */ stft;
});
__webpack_require__.d(__webpack_exports__, "inTopKAsync", function () {
return /* reexport */ inTopKAsync;
});
__webpack_require__.d(__webpack_exports__, "op", function () {
return /* reexport */ operation["a" /* op */];
});
__webpack_require__.d(__webpack_exports__, "image", function () {
return /* reexport */ image_ops_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "linalg", function () {
return /* reexport */ linalg_ops_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "losses", function () {
return /* reexport */ loss_ops_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "spectral", function () {
return /* reexport */ spectral_ops_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "fused", function () {
return /* reexport */ fused_ops_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "signal", function () {
return /* reexport */ signal_ops_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "Reduction", function () {
return /* reexport */ Reduction;
});
__webpack_require__.d(__webpack_exports__, "train", function () {
return /* reexport */ train;
});
__webpack_require__.d(__webpack_exports__, "enableProdMode", function () {
return /* reexport */ enableProdMode;
});
__webpack_require__.d(__webpack_exports__, "enableDebugMode", function () {
return /* reexport */ enableDebugMode;
});
__webpack_require__.d(__webpack_exports__, "disableDeprecationWarnings", function () {
return /* reexport */ disableDeprecationWarnings;
});
__webpack_require__.d(__webpack_exports__, "deprecationWarn", function () {
return /* reexport */ deprecationWarn;
});
__webpack_require__.d(__webpack_exports__, "disposeVariables", function () {
return /* reexport */ disposeVariables;
});
__webpack_require__.d(__webpack_exports__, "engine", function () {
return /* reexport */ globals_engine;
});
__webpack_require__.d(__webpack_exports__, "memory", function () {
return /* reexport */ memory;
});
__webpack_require__.d(__webpack_exports__, "profile", function () {
return /* reexport */ profile;
});
__webpack_require__.d(__webpack_exports__, "tidy", function () {
return /* reexport */ tidy;
});
__webpack_require__.d(__webpack_exports__, "dispose", function () {
return /* reexport */ dispose;
});
__webpack_require__.d(__webpack_exports__, "keep", function () {
return /* reexport */ keep;
});
__webpack_require__.d(__webpack_exports__, "time", function () {
return /* reexport */ time;
});
__webpack_require__.d(__webpack_exports__, "setBackend", function () {
return /* reexport */ setBackend;
});
__webpack_require__.d(__webpack_exports__, "ready", function () {
return /* reexport */ ready;
});
__webpack_require__.d(__webpack_exports__, "getBackend", function () {
return /* reexport */ getBackend;
});
__webpack_require__.d(__webpack_exports__, "removeBackend", function () {
return /* reexport */ removeBackend;
});
__webpack_require__.d(__webpack_exports__, "findBackend", function () {
return /* reexport */ findBackend;
});
__webpack_require__.d(__webpack_exports__, "findBackendFactory", function () {
return /* reexport */ findBackendFactory;
});
__webpack_require__.d(__webpack_exports__, "registerBackend", function () {
return /* reexport */ registerBackend;
});
__webpack_require__.d(__webpack_exports__, "backend", function () {
return /* reexport */ globals_backend;
});
__webpack_require__.d(__webpack_exports__, "setPlatform", function () {
return /* reexport */ setPlatform;
});
__webpack_require__.d(__webpack_exports__, "getKernel", function () {
return /* reexport */ kernel_registry["b" /* getKernel */];
});
__webpack_require__.d(__webpack_exports__, "getGradient", function () {
return /* reexport */ kernel_registry["a" /* getGradient */];
});
__webpack_require__.d(__webpack_exports__, "getKernelsForBackend", function () {
return /* reexport */ kernel_registry["c" /* getKernelsForBackend */];
});
__webpack_require__.d(__webpack_exports__, "registerKernel", function () {
return /* reexport */ kernel_registry["e" /* registerKernel */];
});
__webpack_require__.d(__webpack_exports__, "registerGradient", function () {
return /* reexport */ kernel_registry["d" /* registerGradient */];
});
__webpack_require__.d(__webpack_exports__, "unregisterKernel", function () {
return /* reexport */ kernel_registry["g" /* unregisterKernel */];
});
__webpack_require__.d(__webpack_exports__, "unregisterGradient", function () {
return /* reexport */ kernel_registry["f" /* unregisterGradient */];
});
__webpack_require__.d(__webpack_exports__, "customGrad", function () {
return /* reexport */ customGrad;
});
__webpack_require__.d(__webpack_exports__, "grad", function () {
return /* reexport */ gradients_grad;
});
__webpack_require__.d(__webpack_exports__, "grads", function () {
return /* reexport */ gradients_grads;
});
__webpack_require__.d(__webpack_exports__, "valueAndGrad", function () {
return /* reexport */ valueAndGrad;
});
__webpack_require__.d(__webpack_exports__, "valueAndGrads", function () {
return /* reexport */ valueAndGrads;
});
__webpack_require__.d(__webpack_exports__, "variableGrads", function () {
return /* reexport */ variableGrads;
});
__webpack_require__.d(__webpack_exports__, "Environment", function () {
return /* reexport */ environment["b" /* Environment */];
});
__webpack_require__.d(__webpack_exports__, "env", function () {
return /* reexport */ environment["c" /* env */];
});
__webpack_require__.d(__webpack_exports__, "ENV", function () {
return /* reexport */ environment["a" /* ENV */];
});
__webpack_require__.d(__webpack_exports__, "version_core", function () {
return /* reexport */ version;
});
__webpack_require__.d(__webpack_exports__, "nextFrame", function () {
return /* reexport */ browser_util["a" /* nextFrame */];
});
__webpack_require__.d(__webpack_exports__, "browser", function () {
return /* reexport */ browser_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "io", function () {
return /* reexport */ io_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "math", function () {
return /* reexport */ math_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "serialization", function () {
return /* reexport */ serialization_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "test_util", function () {
return /* reexport */ test_util_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "util", function () {
return /* reexport */ util;
});
__webpack_require__.d(__webpack_exports__, "backend_util", function () {
return /* reexport */ backend_util_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "tensor_util", function () {
return /* reexport */ tensor_util;
});
__webpack_require__.d(__webpack_exports__, "slice_util", function () {
return /* reexport */ slice_util_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "gather_util", function () {
return /* reexport */ gather_nd_util_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "scatter_util", function () {
return /* reexport */ scatter_nd_util_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "device_util", function () {
return /* reexport */ device_util;
});
__webpack_require__.d(__webpack_exports__, "kernel_impls", function () {
return /* reexport */ kernel_impls_namespaceObject;
});
__webpack_require__.d(__webpack_exports__, "KernelBackend", function () {
return /* reexport */ KernelBackend;
});
__webpack_require__.d(__webpack_exports__, "DataStorage", function () {
return /* reexport */ DataStorage;
});
__webpack_require__.d(__webpack_exports__, "Add", function () {
return /* reexport */ kernel_names["a" /* Add */];
});
__webpack_require__.d(__webpack_exports__, "AddN", function () {
return /* reexport */ kernel_names["b" /* AddN */];
});
__webpack_require__.d(__webpack_exports__, "Atan2", function () {
return /* reexport */ kernel_names["c" /* Atan2 */];
});
__webpack_require__.d(__webpack_exports__, "AvgPool", function () {
return /* reexport */ kernel_names["d" /* AvgPool */];
});
__webpack_require__.d(__webpack_exports__, "AvgPoolBackprop", function () {
return /* reexport */ kernel_names["g" /* AvgPoolBackprop */];
});
__webpack_require__.d(__webpack_exports__, "AvgPool3D", function () {
return /* reexport */ kernel_names["e" /* AvgPool3D */];
});
__webpack_require__.d(__webpack_exports__, "AvgPool3DBackprop", function () {
return /* reexport */ kernel_names["f" /* AvgPool3DBackprop */];
});
__webpack_require__.d(__webpack_exports__, "BatchMatMul", function () {
return /* reexport */ kernel_names["h" /* BatchMatMul */];
});
__webpack_require__.d(__webpack_exports__, "BatchToSpaceND", function () {
return /* reexport */ kernel_names["i" /* BatchToSpaceND */];
});
__webpack_require__.d(__webpack_exports__, "BroadcastTo", function () {
return /* reexport */ kernel_names["j" /* BroadcastTo */];
});
__webpack_require__.d(__webpack_exports__, "Complex", function () {
return /* reexport */ kernel_names["k" /* Complex */];
});
__webpack_require__.d(__webpack_exports__, "Concat", function () {
return /* reexport */ kernel_names["l" /* Concat */];
});
__webpack_require__.d(__webpack_exports__, "Conv2D", function () {
return /* reexport */ kernel_names["m" /* Conv2D */];
});
__webpack_require__.d(__webpack_exports__, "Conv2DBackpropFilter", function () {
return /* reexport */ kernel_names["n" /* Conv2DBackpropFilter */];
});
__webpack_require__.d(__webpack_exports__, "Conv2DBackpropInput", function () {
return /* reexport */ kernel_names["o" /* Conv2DBackpropInput */];
});
__webpack_require__.d(__webpack_exports__, "Conv3D", function () {
return /* reexport */ kernel_names["p" /* Conv3D */];
});
__webpack_require__.d(__webpack_exports__, "Conv3DBackpropFilterV2", function () {
return /* reexport */ kernel_names["q" /* Conv3DBackpropFilterV2 */];
});
__webpack_require__.d(__webpack_exports__, "Conv3DBackpropInputV2", function () {
return /* reexport */ kernel_names["r" /* Conv3DBackpropInputV2 */];
});
__webpack_require__.d(__webpack_exports__, "Cumsum", function () {
return /* reexport */ kernel_names["s" /* Cumsum */];
});
__webpack_require__.d(__webpack_exports__, "DepthToSpace", function () {
return /* reexport */ kernel_names["t" /* DepthToSpace */];
});
__webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNative", function () {
return /* reexport */ kernel_names["u" /* DepthwiseConv2dNative */];
});
__webpack_require__.d(
__webpack_exports__,
"DepthwiseConv2dNativeBackpropFilter",
function () {
return /* reexport */ kernel_names["v" /* DepthwiseConv2dNativeBackpropFilter */];
}
);
__webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNativeBackpropInput", function () {
return /* reexport */ kernel_names["w" /* DepthwiseConv2dNativeBackpropInput */];
});
__webpack_require__.d(__webpack_exports__, "Diag", function () {
return /* reexport */ kernel_names["x" /* Diag */];
});
__webpack_require__.d(__webpack_exports__, "Div", function () {
return /* reexport */ kernel_names["y" /* Div */];
});
__webpack_require__.d(__webpack_exports__, "Elu", function () {
return /* reexport */ kernel_names["z" /* Elu */];
});
__webpack_require__.d(__webpack_exports__, "EluGrad", function () {
return /* reexport */ kernel_names["A" /* EluGrad */];
});
__webpack_require__.d(__webpack_exports__, "Equal", function () {
return /* reexport */ kernel_names["B" /* Equal */];
});
__webpack_require__.d(__webpack_exports__, "FloorDiv", function () {
return /* reexport */ kernel_names["D" /* FloorDiv */];
});
__webpack_require__.d(__webpack_exports__, "Fill", function () {
return /* reexport */ kernel_names["C" /* Fill */];
});
__webpack_require__.d(__webpack_exports__, "FusedBatchNorm", function () {
return /* reexport */ kernel_names["F" /* FusedBatchNorm */];
});
__webpack_require__.d(__webpack_exports__, "GatherNd", function () {
return /* reexport */ kernel_names["G" /* GatherNd */];
});
__webpack_require__.d(__webpack_exports__, "Greater", function () {
return /* reexport */ kernel_names["H" /* Greater */];
});
__webpack_require__.d(__webpack_exports__, "GreaterEqual", function () {
return /* reexport */ kernel_names["I" /* GreaterEqual */];
});
__webpack_require__.d(__webpack_exports__, "Identity", function () {
return /* reexport */ kernel_names["J" /* Identity */];
});
__webpack_require__.d(__webpack_exports__, "Imag", function () {
return /* reexport */ kernel_names["K" /* Imag */];
});
__webpack_require__.d(__webpack_exports__, "Less", function () {
return /* reexport */ kernel_names["N" /* Less */];
});
__webpack_require__.d(__webpack_exports__, "LessEqual", function () {
return /* reexport */ kernel_names["O" /* LessEqual */];
});
__webpack_require__.d(__webpack_exports__, "LRN", function () {
return /* reexport */ kernel_names["L" /* LRN */];
});
__webpack_require__.d(__webpack_exports__, "LRNBackprop", function () {
return /* reexport */ kernel_names["M" /* LRNBackprop */];
});
__webpack_require__.d(__webpack_exports__, "Max", function () {
return /* reexport */ kernel_names["P" /* Max */];
});
__webpack_require__.d(__webpack_exports__, "Maximum", function () {
return /* reexport */ kernel_names["V" /* Maximum */];
});
__webpack_require__.d(__webpack_exports__, "MaxPool", function () {
return /* reexport */ kernel_names["Q" /* MaxPool */];
});
__webpack_require__.d(__webpack_exports__, "MaxPoolBackprop", function () {
return /* reexport */ kernel_names["T" /* MaxPoolBackprop */];
});
__webpack_require__.d(__webpack_exports__, "MaxPool3D", function () {
return /* reexport */ kernel_names["R" /* MaxPool3D */];
});
__webpack_require__.d(__webpack_exports__, "MaxPool3DBackprop", function () {
return /* reexport */ kernel_names["S" /* MaxPool3DBackprop */];
});
__webpack_require__.d(__webpack_exports__, "MaxPoolWithArgmax", function () {
return /* reexport */ kernel_names["U" /* MaxPoolWithArgmax */];
});
__webpack_require__.d(__webpack_exports__, "Minimum", function () {
return /* reexport */ kernel_names["W" /* Minimum */];
});
__webpack_require__.d(__webpack_exports__, "Mod", function () {
return /* reexport */ kernel_names["X" /* Mod */];
});
__webpack_require__.d(__webpack_exports__, "Multiply", function () {
return /* reexport */ kernel_names["Y" /* Multiply */];
});
__webpack_require__.d(__webpack_exports__, "NotEqual", function () {
return /* reexport */ kernel_names["bb" /* NotEqual */];
});
__webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV3", function () {
return /* reexport */ kernel_names["Z" /* NonMaxSuppressionV3 */];
});
__webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV5", function () {
return /* reexport */ kernel_names["ab" /* NonMaxSuppressionV5 */];
});
__webpack_require__.d(__webpack_exports__, "OneHot", function () {
return /* reexport */ kernel_names["cb" /* OneHot */];
});
__webpack_require__.d(__webpack_exports__, "PadV2", function () {
return /* reexport */ kernel_names["db" /* PadV2 */];
});
__webpack_require__.d(__webpack_exports__, "Pool", function () {
return /* reexport */ kernel_names["eb" /* Pool */];
});
__webpack_require__.d(__webpack_exports__, "Pow", function () {
return /* reexport */ kernel_names["fb" /* Pow */];
});
__webpack_require__.d(__webpack_exports__, "Prelu", function () {
return /* reexport */ kernel_names["gb" /* Prelu */];
});
__webpack_require__.d(__webpack_exports__, "Real", function () {
return /* reexport */ kernel_names["hb" /* Real */];
});
__webpack_require__.d(__webpack_exports__, "Relu", function () {
return /* reexport */ kernel_names["ib" /* Relu */];
});
__webpack_require__.d(__webpack_exports__, "Relu6", function () {
return /* reexport */ kernel_names["jb" /* Relu6 */];
});
__webpack_require__.d(__webpack_exports__, "SelectV2", function () {
return /* reexport */ kernel_names["kb" /* SelectV2 */];
});
__webpack_require__.d(__webpack_exports__, "Selu", function () {
return /* reexport */ kernel_names["lb" /* Selu */];
});
__webpack_require__.d(__webpack_exports__, "SpaceToBatchND", function () {
return /* reexport */ kernel_names["mb" /* SpaceToBatchND */];
});
__webpack_require__.d(__webpack_exports__, "SplitV", function () {
return /* reexport */ kernel_names["nb" /* SplitV */];
});
__webpack_require__.d(__webpack_exports__, "SquaredDifference", function () {
return /* reexport */ kernel_names["pb" /* SquaredDifference */];
});
__webpack_require__.d(__webpack_exports__, "Square", function () {
return /* reexport */ kernel_names["ob" /* Square */];
});
__webpack_require__.d(__webpack_exports__, "Sub", function () {
return /* reexport */ kernel_names["qb" /* Sub */];
});
__webpack_require__.d(__webpack_exports__, "Tile", function () {
return /* reexport */ kernel_names["rb" /* Tile */];
});
__webpack_require__.d(__webpack_exports__, "Transpose", function () {
return /* reexport */ kernel_names["sb" /* Transpose */];
});
__webpack_require__.d(__webpack_exports__, "FromPixels", function () {
return /* reexport */ kernel_names["E" /* FromPixels */];
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js
var slice_util_namespaceObject = {};
__webpack_require__.r(slice_util_namespaceObject);
__webpack_require__.d(slice_util_namespaceObject, "assertParamsValid", function () {
return assertParamsValid;
});
__webpack_require__.d(slice_util_namespaceObject, "maskToAxes", function () {
return maskToAxes;
});
__webpack_require__.d(slice_util_namespaceObject, "computeOutShape", function () {
return slice_util_computeOutShape;
});
__webpack_require__.d(slice_util_namespaceObject, "stridesWithElidedDims", function () {
return stridesWithElidedDims;
});
__webpack_require__.d(slice_util_namespaceObject, "startIndicesWithElidedDims", function () {
return startIndicesWithElidedDims;
});
__webpack_require__.d(slice_util_namespaceObject, "stopIndicesWithElidedDims", function () {
return stopIndicesWithElidedDims;
});
__webpack_require__.d(slice_util_namespaceObject, "stridesForAxis", function () {
return stridesForAxis;
});
__webpack_require__.d(slice_util_namespaceObject, "startForAxis", function () {
return startForAxis;
});
__webpack_require__.d(slice_util_namespaceObject, "stopForAxis", function () {
return stopForAxis;
});
__webpack_require__.d(slice_util_namespaceObject, "isSliceContinous", function () {
return isSliceContinous;
});
__webpack_require__.d(slice_util_namespaceObject, "computeFlatOffset", function () {
return computeFlatOffset;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/io/io.js
var io_namespaceObject = {};
__webpack_require__.r(io_namespaceObject);
__webpack_require__.d(io_namespaceObject, "copyModel", function () {
return copyModel;
});
__webpack_require__.d(io_namespaceObject, "listModels", function () {
return listModels;
});
__webpack_require__.d(io_namespaceObject, "moveModel", function () {
return moveModel;
});
__webpack_require__.d(io_namespaceObject, "removeModel", function () {
return removeModel;
});
__webpack_require__.d(io_namespaceObject, "browserFiles", function () {
return browserFiles;
});
__webpack_require__.d(io_namespaceObject, "browserHTTPRequest", function () {
return browserHTTPRequest;
});
__webpack_require__.d(io_namespaceObject, "concatenateArrayBuffers", function () {
return io_utils["d" /* concatenateArrayBuffers */];
});
__webpack_require__.d(io_namespaceObject, "decodeWeights", function () {
return io_utils["e" /* decodeWeights */];
});
__webpack_require__.d(io_namespaceObject, "encodeWeights", function () {
return io_utils["f" /* encodeWeights */];
});
__webpack_require__.d(io_namespaceObject, "fromMemory", function () {
return fromMemory;
});
__webpack_require__.d(io_namespaceObject, "getLoadHandlers", function () {
return getLoadHandlers;
});
__webpack_require__.d(io_namespaceObject, "getModelArtifactsInfoForJSON", function () {
return io_utils["g" /* getModelArtifactsInfoForJSON */];
});
__webpack_require__.d(io_namespaceObject, "getSaveHandlers", function () {
return getSaveHandlers;
});
__webpack_require__.d(io_namespaceObject, "http", function () {
return http;
});
__webpack_require__.d(io_namespaceObject, "isHTTPScheme", function () {
return isHTTPScheme;
});
__webpack_require__.d(io_namespaceObject, "loadWeights", function () {
return loadWeights;
});
__webpack_require__.d(io_namespaceObject, "registerLoadRouter", function () {
return registerLoadRouter;
});
__webpack_require__.d(io_namespaceObject, "registerSaveRouter", function () {
return registerSaveRouter;
});
__webpack_require__.d(io_namespaceObject, "weightsLoaderFactory", function () {
return weightsLoaderFactory;
});
__webpack_require__.d(io_namespaceObject, "withSaveHandler", function () {
return withSaveHandler;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/math.js
var math_namespaceObject = {};
__webpack_require__.r(math_namespaceObject);
__webpack_require__.d(math_namespaceObject, "confusionMatrix", function () {
return confusionMatrix;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/browser.js
var browser_namespaceObject = {};
__webpack_require__.r(browser_namespaceObject);
__webpack_require__.d(browser_namespaceObject, "toPixels", function () {
return toPixels;
});
__webpack_require__.d(browser_namespaceObject, "fromPixels", function () {
return fromPixels;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js
var gather_nd_util_namespaceObject = {};
__webpack_require__.r(gather_nd_util_namespaceObject);
__webpack_require__.d(gather_nd_util_namespaceObject, "prepareAndValidate", function () {
return prepareAndValidate;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js
var scatter_nd_util_namespaceObject = {};
__webpack_require__.r(scatter_nd_util_namespaceObject);
__webpack_require__.d(scatter_nd_util_namespaceObject, "validateUpdateShape", function () {
return validateUpdateShape;
});
__webpack_require__.d(scatter_nd_util_namespaceObject, "validateInput", function () {
return validateInput;
});
__webpack_require__.d(scatter_nd_util_namespaceObject, "calculateShapes", function () {
return calculateShapes;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/serialization.js
var serialization_namespaceObject = {};
__webpack_require__.r(serialization_namespaceObject);
__webpack_require__.d(serialization_namespaceObject, "Serializable", function () {
return Serializable;
});
__webpack_require__.d(serialization_namespaceObject, "SerializationMap", function () {
return SerializationMap;
});
__webpack_require__.d(serialization_namespaceObject, "registerClass", function () {
return registerClass;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/test_util.js
var test_util_namespaceObject = {};
__webpack_require__.r(test_util_namespaceObject);
__webpack_require__.d(test_util_namespaceObject, "TEST_EPSILON_FLOAT16", function () {
return TEST_EPSILON_FLOAT16;
});
__webpack_require__.d(test_util_namespaceObject, "expectArraysClose", function () {
return expectArraysClose;
});
__webpack_require__.d(test_util_namespaceObject, "testEpsilon", function () {
return testEpsilon;
});
__webpack_require__.d(test_util_namespaceObject, "expectPromiseToFail", function () {
return expectPromiseToFail;
});
__webpack_require__.d(test_util_namespaceObject, "expectArraysEqual", function () {
return expectArraysEqual;
});
__webpack_require__.d(test_util_namespaceObject, "expectNumbersClose", function () {
return expectNumbersClose;
});
__webpack_require__.d(test_util_namespaceObject, "expectValuesInRange", function () {
return expectValuesInRange;
});
__webpack_require__.d(test_util_namespaceObject, "expectArrayBuffersEqual", function () {
return expectArrayBuffersEqual;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js
var segment_util_namespaceObject = {};
__webpack_require__.r(segment_util_namespaceObject);
__webpack_require__.d(
segment_util_namespaceObject,
"segOpComputeOptimalWindowSize",
function () {
return segOpComputeOptimalWindowSize;
}
);
__webpack_require__.d(segment_util_namespaceObject, "computeOutShape", function () {
return segment_util_computeOutShape;
});
__webpack_require__.d(segment_util_namespaceObject, "collectGatherOpShapeInfo", function () {
return collectGatherOpShapeInfo;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/spectral_ops.js
var spectral_ops_namespaceObject = {};
__webpack_require__.r(spectral_ops_namespaceObject);
__webpack_require__.d(spectral_ops_namespaceObject, "fft", function () {
return fft;
});
__webpack_require__.d(spectral_ops_namespaceObject, "ifft", function () {
return ifft;
});
__webpack_require__.d(spectral_ops_namespaceObject, "rfft", function () {
return rfft;
});
__webpack_require__.d(spectral_ops_namespaceObject, "irfft", function () {
return irfft;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops.js
var signal_ops_namespaceObject = {};
__webpack_require__.r(signal_ops_namespaceObject);
__webpack_require__.d(signal_ops_namespaceObject, "hannWindow", function () {
return hannWindow;
});
__webpack_require__.d(signal_ops_namespaceObject, "hammingWindow", function () {
return hammingWindow;
});
__webpack_require__.d(signal_ops_namespaceObject, "frame", function () {
return signal_ops_frame;
});
__webpack_require__.d(signal_ops_namespaceObject, "stft", function () {
return stft;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops.js
var loss_ops_namespaceObject = {};
__webpack_require__.r(loss_ops_namespaceObject);
__webpack_require__.d(loss_ops_namespaceObject, "Reduction", function () {
return Reduction;
});
__webpack_require__.d(loss_ops_namespaceObject, "absoluteDifference", function () {
return absoluteDifference;
});
__webpack_require__.d(loss_ops_namespaceObject, "computeWeightedLoss", function () {
return computeWeightedLoss;
});
__webpack_require__.d(loss_ops_namespaceObject, "cosineDistance", function () {
return cosineDistance;
});
__webpack_require__.d(loss_ops_namespaceObject, "hingeLoss", function () {
return hingeLoss;
});
__webpack_require__.d(loss_ops_namespaceObject, "huberLoss", function () {
return huberLoss;
});
__webpack_require__.d(loss_ops_namespaceObject, "logLoss", function () {
return logLoss;
});
__webpack_require__.d(loss_ops_namespaceObject, "meanSquaredError", function () {
return meanSquaredError;
});
__webpack_require__.d(loss_ops_namespaceObject, "sigmoidCrossEntropy", function () {
return sigmoidCrossEntropy;
});
__webpack_require__.d(loss_ops_namespaceObject, "softmaxCrossEntropy", function () {
return softmaxCrossEntropy;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/linalg_ops.js
var linalg_ops_namespaceObject = {};
__webpack_require__.r(linalg_ops_namespaceObject);
__webpack_require__.d(linalg_ops_namespaceObject, "bandPart", function () {
return bandPart;
});
__webpack_require__.d(linalg_ops_namespaceObject, "gramSchmidt", function () {
return gramSchmidt;
});
__webpack_require__.d(linalg_ops_namespaceObject, "qr", function () {
return qr;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/image_ops.js
var image_ops_namespaceObject = {};
__webpack_require__.r(image_ops_namespaceObject);
__webpack_require__.d(image_ops_namespaceObject, "nonMaxSuppression", function () {
return nonMaxSuppression;
});
__webpack_require__.d(image_ops_namespaceObject, "resizeBilinear", function () {
return resizeBilinear;
});
__webpack_require__.d(image_ops_namespaceObject, "resizeNearestNeighbor", function () {
return resizeNearestNeighbor;
});
__webpack_require__.d(image_ops_namespaceObject, "nonMaxSuppressionAsync", function () {
return nonMaxSuppressionAsync;
});
__webpack_require__.d(image_ops_namespaceObject, "nonMaxSuppressionWithScore", function () {
return nonMaxSuppressionWithScore;
});
__webpack_require__.d(
image_ops_namespaceObject,
"nonMaxSuppressionWithScoreAsync",
function () {
return nonMaxSuppressionWithScoreAsync;
}
);
__webpack_require__.d(image_ops_namespaceObject, "cropAndResize", function () {
return cropAndResize;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js
var fused_ops_namespaceObject = {};
__webpack_require__.r(fused_ops_namespaceObject);
__webpack_require__.d(fused_ops_namespaceObject, "matMul", function () {
return fused_ops_matMul;
});
__webpack_require__.d(fused_ops_namespaceObject, "conv2d", function () {
return fused_ops_conv2d;
});
__webpack_require__.d(fused_ops_namespaceObject, "depthwiseConv2d", function () {
return fused_ops_depthwiseConv2d;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/ops.js
var ops_namespaceObject = {};
__webpack_require__.r(ops_namespaceObject);
__webpack_require__.d(ops_namespaceObject, "add", function () {
return add;
});
__webpack_require__.d(ops_namespaceObject, "addN", function () {
return addN;
});
__webpack_require__.d(ops_namespaceObject, "atan2", function () {
return atan2;
});
__webpack_require__.d(ops_namespaceObject, "avgPool", function () {
return avgPool;
});
__webpack_require__.d(ops_namespaceObject, "avgPool3d", function () {
return avgPool3d;
});
__webpack_require__.d(ops_namespaceObject, "batchToSpaceND", function () {
return batchToSpaceND;
});
__webpack_require__.d(ops_namespaceObject, "batchNorm", function () {
return batchNorm;
});
__webpack_require__.d(ops_namespaceObject, "batchNorm2d", function () {
return batchNorm2d;
});
__webpack_require__.d(ops_namespaceObject, "batchNorm3d", function () {
return batchNorm3d;
});
__webpack_require__.d(ops_namespaceObject, "batchNorm4d", function () {
return batchNorm4d;
});
__webpack_require__.d(ops_namespaceObject, "broadcastTo", function () {
return broadcastTo;
});
__webpack_require__.d(ops_namespaceObject, "clone", function () {
return clone;
});
__webpack_require__.d(ops_namespaceObject, "complex", function () {
return complex["a" /* complex */];
});
__webpack_require__.d(ops_namespaceObject, "concat", function () {
return concat;
});
__webpack_require__.d(ops_namespaceObject, "concat1d", function () {
return concat1d;
});
__webpack_require__.d(ops_namespaceObject, "concat2d", function () {
return concat2d;
});
__webpack_require__.d(ops_namespaceObject, "concat3d", function () {
return concat3d;
});
__webpack_require__.d(ops_namespaceObject, "concat4d", function () {
return concat4d;
});
__webpack_require__.d(ops_namespaceObject, "conv1d", function () {
return conv1d;
});
__webpack_require__.d(ops_namespaceObject, "conv2d", function () {
return conv2d;
});
__webpack_require__.d(ops_namespaceObject, "conv2dTranspose", function () {
return conv2dTranspose;
});
__webpack_require__.d(ops_namespaceObject, "conv3d", function () {
return conv3d;
});
__webpack_require__.d(ops_namespaceObject, "conv3dTranspose", function () {
return conv3dTranspose;
});
__webpack_require__.d(ops_namespaceObject, "cumsum", function () {
return cumsum;
});
__webpack_require__.d(ops_namespaceObject, "depthToSpace", function () {
return depthToSpace;
});
__webpack_require__.d(ops_namespaceObject, "depthwiseConv2d", function () {
return depthwiseConv2d;
});
__webpack_require__.d(ops_namespaceObject, "diag", function () {
return diag;
});
__webpack_require__.d(ops_namespaceObject, "div", function () {
return div;
});
__webpack_require__.d(ops_namespaceObject, "divNoNan", function () {
return divNoNan;
});
__webpack_require__.d(ops_namespaceObject, "dot", function () {
return dot;
});
__webpack_require__.d(ops_namespaceObject, "elu", function () {
return elu;
});
__webpack_require__.d(ops_namespaceObject, "equal", function () {
return equal;
});
__webpack_require__.d(ops_namespaceObject, "eye", function () {
return eye;
});
__webpack_require__.d(ops_namespaceObject, "fill", function () {
return fill;
});
__webpack_require__.d(ops_namespaceObject, "floorDiv", function () {
return floorDiv;
});
__webpack_require__.d(ops_namespaceObject, "greater", function () {
return greater;
});
__webpack_require__.d(ops_namespaceObject, "greaterEqual", function () {
return greaterEqual;
});
__webpack_require__.d(ops_namespaceObject, "imag", function () {
return imag["a" /* imag */];
});
__webpack_require__.d(ops_namespaceObject, "leakyRelu", function () {
return leakyRelu;
});
__webpack_require__.d(ops_namespaceObject, "less", function () {
return less;
});
__webpack_require__.d(ops_namespaceObject, "lessEqual", function () {
return lessEqual;
});
__webpack_require__.d(ops_namespaceObject, "localResponseNormalization", function () {
return localResponseNormalization;
});
__webpack_require__.d(ops_namespaceObject, "matMul", function () {
return matMul;
});
__webpack_require__.d(ops_namespaceObject, "max", function () {
return max_max;
});
__webpack_require__.d(ops_namespaceObject, "maxPool", function () {
return maxPool;
});
__webpack_require__.d(ops_namespaceObject, "maxPool3d", function () {
return maxPool3d;
});
__webpack_require__.d(ops_namespaceObject, "maxPoolWithArgmax", function () {
return maxPoolWithArgmax;
});
__webpack_require__.d(ops_namespaceObject, "maximum", function () {
return maximum;
});
__webpack_require__.d(ops_namespaceObject, "minimum", function () {
return minimum;
});
__webpack_require__.d(ops_namespaceObject, "mod", function () {
return mod;
});
__webpack_require__.d(ops_namespaceObject, "mul", function () {
return mul;
});
__webpack_require__.d(ops_namespaceObject, "multinomial", function () {
return multinomial;
});
__webpack_require__.d(ops_namespaceObject, "notEqual", function () {
return notEqual;
});
__webpack_require__.d(ops_namespaceObject, "oneHot", function () {
return oneHot;
});
__webpack_require__.d(ops_namespaceObject, "outerProduct", function () {
return outerProduct;
});
__webpack_require__.d(ops_namespaceObject, "pad", function () {
return pad_pad;
});
__webpack_require__.d(ops_namespaceObject, "pad1d", function () {
return pad1d;
});
__webpack_require__.d(ops_namespaceObject, "pad2d", function () {
return pad2d;
});
__webpack_require__.d(ops_namespaceObject, "pad3d", function () {
return pad3d;
});
__webpack_require__.d(ops_namespaceObject, "pad4d", function () {
return pad4d;
});
__webpack_require__.d(ops_namespaceObject, "pool", function () {
return pool;
});
__webpack_require__.d(ops_namespaceObject, "pow", function () {
return pow;
});
__webpack_require__.d(ops_namespaceObject, "prelu", function () {
return prelu;
});
__webpack_require__.d(ops_namespaceObject, "rand", function () {
return rand;
});
__webpack_require__.d(ops_namespaceObject, "randomGamma", function () {
return randomGamma;
});
__webpack_require__.d(ops_namespaceObject, "randomNormal", function () {
return randomNormal;
});
__webpack_require__.d(ops_namespaceObject, "randomUniform", function () {
return randomUniform;
});
__webpack_require__.d(ops_namespaceObject, "real", function () {
return real["a" /* real */];
});
__webpack_require__.d(ops_namespaceObject, "relu", function () {
return relu;
});
__webpack_require__.d(ops_namespaceObject, "relu6", function () {
return relu6;
});
__webpack_require__.d(ops_namespaceObject, "selu", function () {
return selu;
});
__webpack_require__.d(ops_namespaceObject, "separableConv2d", function () {
return separableConv2d;
});
__webpack_require__.d(ops_namespaceObject, "spaceToBatchND", function () {
return spaceToBatchND;
});
__webpack_require__.d(ops_namespaceObject, "split", function () {
return split;
});
__webpack_require__.d(ops_namespaceObject, "square", function () {
return square;
});
__webpack_require__.d(ops_namespaceObject, "squaredDifference", function () {
return squaredDifference;
});
__webpack_require__.d(ops_namespaceObject, "sub", function () {
return sub;
});
__webpack_require__.d(ops_namespaceObject, "tile", function () {
return tile;
});
__webpack_require__.d(ops_namespaceObject, "truncatedNormal", function () {
return truncatedNormal;
});
__webpack_require__.d(ops_namespaceObject, "booleanMaskAsync", function () {
return booleanMaskAsync;
});
__webpack_require__.d(ops_namespaceObject, "reverse", function () {
return reverse_reverse;
});
__webpack_require__.d(ops_namespaceObject, "reverse1d", function () {
return reverse1d;
});
__webpack_require__.d(ops_namespaceObject, "reverse2d", function () {
return reverse2d;
});
__webpack_require__.d(ops_namespaceObject, "reverse3d", function () {
return reverse3d;
});
__webpack_require__.d(ops_namespaceObject, "reverse4d", function () {
return reverse4d;
});
__webpack_require__.d(ops_namespaceObject, "slice", function () {
return slice;
});
__webpack_require__.d(ops_namespaceObject, "slice1d", function () {
return slice1d;
});
__webpack_require__.d(ops_namespaceObject, "slice2d", function () {
return slice2d;
});
__webpack_require__.d(ops_namespaceObject, "slice3d", function () {
return slice3d;
});
__webpack_require__.d(ops_namespaceObject, "slice4d", function () {
return slice4d;
});
__webpack_require__.d(ops_namespaceObject, "abs", function () {
return abs;
});
__webpack_require__.d(ops_namespaceObject, "acos", function () {
return acos;
});
__webpack_require__.d(ops_namespaceObject, "acosh", function () {
return acosh;
});
__webpack_require__.d(ops_namespaceObject, "asin", function () {
return asin;
});
__webpack_require__.d(ops_namespaceObject, "asinh", function () {
return asinh;
});
__webpack_require__.d(ops_namespaceObject, "atan", function () {
return atan;
});
__webpack_require__.d(ops_namespaceObject, "atanh", function () {
return atanh;
});
__webpack_require__.d(ops_namespaceObject, "ceil", function () {
return ceil;
});
__webpack_require__.d(ops_namespaceObject, "clipByValue", function () {
return clipByValue;
});
__webpack_require__.d(ops_namespaceObject, "cos", function () {
return cos;
});
__webpack_require__.d(ops_namespaceObject, "cosh", function () {
return cosh;
});
__webpack_require__.d(ops_namespaceObject, "erf", function () {
return erf;
});
__webpack_require__.d(ops_namespaceObject, "exp", function () {
return unary_ops_exp;
});
__webpack_require__.d(ops_namespaceObject, "expm1", function () {
return expm1;
});
__webpack_require__.d(ops_namespaceObject, "floor", function () {
return floor;
});
__webpack_require__.d(ops_namespaceObject, "log", function () {
return log;
});
__webpack_require__.d(ops_namespaceObject, "log1p", function () {
return log1p;
});
__webpack_require__.d(ops_namespaceObject, "logSigmoid", function () {
return logSigmoid;
});
__webpack_require__.d(ops_namespaceObject, "neg", function () {
return neg;
});
__webpack_require__.d(ops_namespaceObject, "reciprocal", function () {
return reciprocal;
});
__webpack_require__.d(ops_namespaceObject, "round", function () {
return round;
});
__webpack_require__.d(ops_namespaceObject, "rsqrt", function () {
return rsqrt;
});
__webpack_require__.d(ops_namespaceObject, "sigmoid", function () {
return sigmoid;
});
__webpack_require__.d(ops_namespaceObject, "sign", function () {
return sign;
});
__webpack_require__.d(ops_namespaceObject, "isNaN", function () {
return unary_ops_isNaN;
});
__webpack_require__.d(ops_namespaceObject, "isInf", function () {
return isInf;
});
__webpack_require__.d(ops_namespaceObject, "isFinite", function () {
return unary_ops_isFinite;
});
__webpack_require__.d(ops_namespaceObject, "sin", function () {
return sin;
});
__webpack_require__.d(ops_namespaceObject, "sinh", function () {
return sinh;
});
__webpack_require__.d(ops_namespaceObject, "softplus", function () {
return softplus;
});
__webpack_require__.d(ops_namespaceObject, "sqrt", function () {
return sqrt;
});
__webpack_require__.d(ops_namespaceObject, "step", function () {
return unary_ops_step;
});
__webpack_require__.d(ops_namespaceObject, "tan", function () {
return tan;
});
__webpack_require__.d(ops_namespaceObject, "tanh", function () {
return tanh;
});
__webpack_require__.d(ops_namespaceObject, "all", function () {
return reduction_ops_all;
});
__webpack_require__.d(ops_namespaceObject, "any", function () {
return any;
});
__webpack_require__.d(ops_namespaceObject, "argMax", function () {
return argMax;
});
__webpack_require__.d(ops_namespaceObject, "argMin", function () {
return argMin;
});
__webpack_require__.d(ops_namespaceObject, "logSumExp", function () {
return logSumExp;
});
__webpack_require__.d(ops_namespaceObject, "mean", function () {
return reduction_ops_mean;
});
__webpack_require__.d(ops_namespaceObject, "min", function () {
return reduction_ops_min;
});
__webpack_require__.d(ops_namespaceObject, "moments", function () {
return moments;
});
__webpack_require__.d(ops_namespaceObject, "sum", function () {
return sum;
});
__webpack_require__.d(ops_namespaceObject, "prod", function () {
return reduction_ops_prod;
});
__webpack_require__.d(ops_namespaceObject, "equalStrict", function () {
return equalStrict;
});
__webpack_require__.d(ops_namespaceObject, "greaterEqualStrict", function () {
return greaterEqualStrict;
});
__webpack_require__.d(ops_namespaceObject, "greaterStrict", function () {
return greaterStrict;
});
__webpack_require__.d(ops_namespaceObject, "lessEqualStrict", function () {
return lessEqualStrict;
});
__webpack_require__.d(ops_namespaceObject, "lessStrict", function () {
return lessStrict;
});
__webpack_require__.d(ops_namespaceObject, "notEqualStrict", function () {
return notEqualStrict;
});
__webpack_require__.d(ops_namespaceObject, "addStrict", function () {
return addStrict;
});
__webpack_require__.d(ops_namespaceObject, "divStrict", function () {
return divStrict;
});
__webpack_require__.d(ops_namespaceObject, "maximumStrict", function () {
return maximumStrict;
});
__webpack_require__.d(ops_namespaceObject, "minimumStrict", function () {
return minimumStrict;
});
__webpack_require__.d(ops_namespaceObject, "modStrict", function () {
return modStrict;
});
__webpack_require__.d(ops_namespaceObject, "mulStrict", function () {
return mulStrict;
});
__webpack_require__.d(ops_namespaceObject, "powStrict", function () {
return powStrict;
});
__webpack_require__.d(ops_namespaceObject, "squaredDifferenceStrict", function () {
return squaredDifferenceStrict;
});
__webpack_require__.d(ops_namespaceObject, "subStrict", function () {
return subStrict;
});
__webpack_require__.d(ops_namespaceObject, "logicalAnd", function () {
return logicalAnd;
});
__webpack_require__.d(ops_namespaceObject, "logicalNot", function () {
return logicalNot;
});
__webpack_require__.d(ops_namespaceObject, "logicalOr", function () {
return logicalOr;
});
__webpack_require__.d(ops_namespaceObject, "logicalXor", function () {
return logicalXor;
});
__webpack_require__.d(ops_namespaceObject, "where", function () {
return where;
});
__webpack_require__.d(ops_namespaceObject, "whereAsync", function () {
return whereAsync;
});
__webpack_require__.d(ops_namespaceObject, "buffer", function () {
return array_ops_buffer;
});
__webpack_require__.d(ops_namespaceObject, "print", function () {
return print;
});
__webpack_require__.d(ops_namespaceObject, "cast", function () {
return cast;
});
__webpack_require__.d(ops_namespaceObject, "expandDims", function () {
return expandDims;
});
__webpack_require__.d(ops_namespaceObject, "reshape", function () {
return reshape;
});
__webpack_require__.d(ops_namespaceObject, "squeeze", function () {
return squeeze;
});
__webpack_require__.d(ops_namespaceObject, "stack", function () {
return stack;
});
__webpack_require__.d(ops_namespaceObject, "unstack", function () {
return unstack;
});
__webpack_require__.d(ops_namespaceObject, "setdiff1dAsync", function () {
return setdiff1dAsync;
});
__webpack_require__.d(ops_namespaceObject, "linspace", function () {
return tensor_ops["a" /* linspace */];
});
__webpack_require__.d(ops_namespaceObject, "ones", function () {
return tensor_ops["b" /* ones */];
});
__webpack_require__.d(ops_namespaceObject, "range", function () {
return tensor_ops["d" /* range */];
});
__webpack_require__.d(ops_namespaceObject, "scalar", function () {
return tensor_ops["e" /* scalar */];
});
__webpack_require__.d(ops_namespaceObject, "tensor", function () {
return tensor_ops["f" /* tensor */];
});
__webpack_require__.d(ops_namespaceObject, "tensor1d", function () {
return tensor_ops["g" /* tensor1d */];
});
__webpack_require__.d(ops_namespaceObject, "tensor2d", function () {
return tensor_ops["h" /* tensor2d */];
});
__webpack_require__.d(ops_namespaceObject, "tensor3d", function () {
return tensor_ops["i" /* tensor3d */];
});
__webpack_require__.d(ops_namespaceObject, "tensor4d", function () {
return tensor_ops["j" /* tensor4d */];
});
__webpack_require__.d(ops_namespaceObject, "tensor5d", function () {
return tensor_ops["k" /* tensor5d */];
});
__webpack_require__.d(ops_namespaceObject, "tensor6d", function () {
return tensor_ops["l" /* tensor6d */];
});
__webpack_require__.d(ops_namespaceObject, "variable", function () {
return tensor_ops["m" /* variable */];
});
__webpack_require__.d(ops_namespaceObject, "zeros", function () {
return tensor_ops["n" /* zeros */];
});
__webpack_require__.d(ops_namespaceObject, "onesLike", function () {
return tensor_ops["c" /* onesLike */];
});
__webpack_require__.d(ops_namespaceObject, "zerosLike", function () {
return tensor_ops["o" /* zerosLike */];
});
__webpack_require__.d(ops_namespaceObject, "transpose", function () {
return transpose;
});
__webpack_require__.d(ops_namespaceObject, "softmax", function () {
return softmax;
});
__webpack_require__.d(ops_namespaceObject, "logSoftmax", function () {
return logSoftmax;
});
__webpack_require__.d(ops_namespaceObject, "norm", function () {
return norm_norm;
});
__webpack_require__.d(ops_namespaceObject, "gather", function () {
return gather;
});
__webpack_require__.d(ops_namespaceObject, "unsortedSegmentSum", function () {
return unsortedSegmentSum;
});
__webpack_require__.d(ops_namespaceObject, "basicLSTMCell", function () {
return basicLSTMCell;
});
__webpack_require__.d(ops_namespaceObject, "multiRNNCell", function () {
return multiRNNCell;
});
__webpack_require__.d(ops_namespaceObject, "movingAverage", function () {
return movingAverage;
});
__webpack_require__.d(ops_namespaceObject, "stridedSlice", function () {
return stridedSlice;
});
__webpack_require__.d(ops_namespaceObject, "topk", function () {
return topk;
});
__webpack_require__.d(ops_namespaceObject, "scatterND", function () {
return scatterND;
});
__webpack_require__.d(ops_namespaceObject, "fft", function () {
return fft;
});
__webpack_require__.d(ops_namespaceObject, "ifft", function () {
return ifft;
});
__webpack_require__.d(ops_namespaceObject, "rfft", function () {
return rfft;
});
__webpack_require__.d(ops_namespaceObject, "irfft", function () {
return irfft;
});
__webpack_require__.d(ops_namespaceObject, "sparseToDense", function () {
return sparseToDense;
});
__webpack_require__.d(ops_namespaceObject, "gatherND", function () {
return gatherND;
});
__webpack_require__.d(ops_namespaceObject, "dropout", function () {
return dropout;
});
__webpack_require__.d(ops_namespaceObject, "hannWindow", function () {
return hannWindow;
});
__webpack_require__.d(ops_namespaceObject, "hammingWindow", function () {
return hammingWindow;
});
__webpack_require__.d(ops_namespaceObject, "frame", function () {
return signal_ops_frame;
});
__webpack_require__.d(ops_namespaceObject, "stft", function () {
return stft;
});
__webpack_require__.d(ops_namespaceObject, "inTopKAsync", function () {
return inTopKAsync;
});
__webpack_require__.d(ops_namespaceObject, "op", function () {
return operation["a" /* op */];
});
__webpack_require__.d(ops_namespaceObject, "image", function () {
return image_ops_namespaceObject;
});
__webpack_require__.d(ops_namespaceObject, "linalg", function () {
return linalg_ops_namespaceObject;
});
__webpack_require__.d(ops_namespaceObject, "losses", function () {
return loss_ops_namespaceObject;
});
__webpack_require__.d(ops_namespaceObject, "spectral", function () {
return spectral_ops_namespaceObject;
});
__webpack_require__.d(ops_namespaceObject, "fused", function () {
return fused_ops_namespaceObject;
});
__webpack_require__.d(ops_namespaceObject, "signal", function () {
return signal_ops_namespaceObject;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js
var backend_util_namespaceObject = {};
__webpack_require__.r(backend_util_namespaceObject);
__webpack_require__.d(backend_util_namespaceObject, "axesAreInnerMostDims", function () {
return axesAreInnerMostDims;
});
__webpack_require__.d(backend_util_namespaceObject, "combineLocations", function () {
return combineLocations;
});
__webpack_require__.d(backend_util_namespaceObject, "computeOutAndReduceShapes", function () {
return computeOutAndReduceShapes;
});
__webpack_require__.d(backend_util_namespaceObject, "expandShapeToKeepDim", function () {
return expandShapeToKeepDim;
});
__webpack_require__.d(
backend_util_namespaceObject,
"assertAxesAreInnerMostDims",
function () {
return assertAxesAreInnerMostDims;
}
);
__webpack_require__.d(backend_util_namespaceObject, "getAxesPermutation", function () {
return getAxesPermutation;
});
__webpack_require__.d(backend_util_namespaceObject, "getUndoAxesPermutation", function () {
return getUndoAxesPermutation;
});
__webpack_require__.d(backend_util_namespaceObject, "getInnerMostAxes", function () {
return getInnerMostAxes;
});
__webpack_require__.d(backend_util_namespaceObject, "getBroadcastDims", function () {
return getBroadcastDims;
});
__webpack_require__.d(backend_util_namespaceObject, "getReductionAxes", function () {
return getReductionAxes;
});
__webpack_require__.d(
backend_util_namespaceObject,
"assertAndGetBroadcastShape",
function () {
return assertAndGetBroadcastShape;
}
);
__webpack_require__.d(backend_util_namespaceObject, "assertParamsConsistent", function () {
return assertParamsConsistent;
});
__webpack_require__.d(backend_util_namespaceObject, "computeOutShape", function () {
return computeOutShape;
});
__webpack_require__.d(backend_util_namespaceObject, "computePool2DInfo", function () {
return computePool2DInfo;
});
__webpack_require__.d(backend_util_namespaceObject, "computePool3DInfo", function () {
return computePool3DInfo;
});
__webpack_require__.d(backend_util_namespaceObject, "computeConv2DInfo", function () {
return computeConv2DInfo;
});
__webpack_require__.d(backend_util_namespaceObject, "computeConv3DInfo", function () {
return computeConv3DInfo;
});
__webpack_require__.d(backend_util_namespaceObject, "computeDefaultPad", function () {
return computeDefaultPad;
});
__webpack_require__.d(backend_util_namespaceObject, "tupleValuesAreOne", function () {
return tupleValuesAreOne;
});
__webpack_require__.d(
backend_util_namespaceObject,
"eitherStridesOrDilationsAreOne",
function () {
return eitherStridesOrDilationsAreOne;
}
);
__webpack_require__.d(backend_util_namespaceObject, "convertConv2DDataFormat", function () {
return convertConv2DDataFormat;
});
__webpack_require__.d(backend_util_namespaceObject, "PARALLELIZE_THRESHOLD", function () {
return PARALLELIZE_THRESHOLD;
});
__webpack_require__.d(backend_util_namespaceObject, "computeOptimalWindowSize", function () {
return computeOptimalWindowSize;
});
__webpack_require__.d(backend_util_namespaceObject, "nonMaxSuppressionV3", function () {
return nonMaxSuppressionV3;
});
__webpack_require__.d(backend_util_namespaceObject, "nonMaxSuppressionV5", function () {
return nonMaxSuppressionV5;
});
__webpack_require__.d(backend_util_namespaceObject, "upcastType", function () {
return dist_types["c" /* upcastType */];
});
__webpack_require__.d(backend_util_namespaceObject, "getReshaped", function () {
return getReshaped;
});
__webpack_require__.d(backend_util_namespaceObject, "getPermuted", function () {
return getPermuted;
});
__webpack_require__.d(backend_util_namespaceObject, "getReshapedPermuted", function () {
return getReshapedPermuted;
});
__webpack_require__.d(backend_util_namespaceObject, "getSliceBeginCoords", function () {
return getSliceBeginCoords;
});
__webpack_require__.d(backend_util_namespaceObject, "getSliceSize", function () {
return getSliceSize;
});
__webpack_require__.d(backend_util_namespaceObject, "prepareAndValidate", function () {
return prepareAndValidate;
});
__webpack_require__.d(backend_util_namespaceObject, "validateUpdateShape", function () {
return validateUpdateShape;
});
__webpack_require__.d(backend_util_namespaceObject, "validateInput", function () {
return validateInput;
});
__webpack_require__.d(backend_util_namespaceObject, "calculateShapes", function () {
return calculateShapes;
});
__webpack_require__.d(backend_util_namespaceObject, "SELU_SCALEALPHA", function () {
return SELU_SCALEALPHA;
});
__webpack_require__.d(backend_util_namespaceObject, "SELU_SCALE", function () {
return SELU_SCALE;
});
__webpack_require__.d(backend_util_namespaceObject, "shouldFuse", function () {
return shouldFuse;
});
__webpack_require__.d(backend_util_namespaceObject, "ERF_P", function () {
return ERF_P;
});
__webpack_require__.d(backend_util_namespaceObject, "ERF_A1", function () {
return ERF_A1;
});
__webpack_require__.d(backend_util_namespaceObject, "ERF_A2", function () {
return ERF_A2;
});
__webpack_require__.d(backend_util_namespaceObject, "ERF_A3", function () {
return ERF_A3;
});
__webpack_require__.d(backend_util_namespaceObject, "ERF_A4", function () {
return ERF_A4;
});
__webpack_require__.d(backend_util_namespaceObject, "ERF_A5", function () {
return ERF_A5;
});
__webpack_require__.d(backend_util_namespaceObject, "warn", function () {
return warn;
});
__webpack_require__.d(backend_util_namespaceObject, "log", function () {
return log_log;
});
__webpack_require__.d(backend_util_namespaceObject, "mergeRealAndImagArrays", function () {
return mergeRealAndImagArrays;
});
__webpack_require__.d(backend_util_namespaceObject, "splitRealAndImagArrays", function () {
return splitRealAndImagArrays;
});
__webpack_require__.d(backend_util_namespaceObject, "complexWithEvenIndex", function () {
return complexWithEvenIndex;
});
__webpack_require__.d(backend_util_namespaceObject, "complexWithOddIndex", function () {
return complexWithOddIndex;
});
__webpack_require__.d(backend_util_namespaceObject, "getComplexWithIndex", function () {
return getComplexWithIndex;
});
__webpack_require__.d(backend_util_namespaceObject, "assignToTypedArray", function () {
return assignToTypedArray;
});
__webpack_require__.d(backend_util_namespaceObject, "exponents", function () {
return exponents;
});
__webpack_require__.d(backend_util_namespaceObject, "exponent", function () {
return exponent;
});
__webpack_require__.d(backend_util_namespaceObject, "segment_util", function () {
return segment_util_namespaceObject;
});
__webpack_require__.d(backend_util_namespaceObject, "castTensor", function () {
return castTensor;
});
__webpack_require__.d(backend_util_namespaceObject, "reshapeTensor", function () {
return reshapeTensor;
});
__webpack_require__.d(backend_util_namespaceObject, "linspaceImpl", function () {
return linspaceImpl;
});
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js
var kernel_impls_namespaceObject = {};
__webpack_require__.r(kernel_impls_namespaceObject);
__webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV3", function () {
return nonMaxSuppressionV3;
});
__webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV5", function () {
return nonMaxSuppressionV5;
});
__webpack_require__.d(kernel_impls_namespaceObject, "split", function () {
return split_shared_split;
});
__webpack_require__.d(kernel_impls_namespaceObject, "tile", function () {
return tile_impl_tile;
});
__webpack_require__.d(kernel_impls_namespaceObject, "topkImpl", function () {
return topkImpl;
});
__webpack_require__.d(kernel_impls_namespaceObject, "whereImpl", function () {
return whereImpl;
});
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/engine.js + 2 modules
var engine = __webpack_require__(5);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/flags.js
var flags = __webpack_require__(61);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_names.js
var kernel_names = __webpack_require__(6);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js
/**
* @license
* Copyright 2017 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the dimensions in the input shape that are broadcasted to
* produce the provided output shape.
*
* The returned dimensions are 0-indexed and sorted. An example:
* inShape = [4, 1, 3]
* outShape = [5, 4, 3, 3]
* result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.
*/
function getBroadcastDims(inShape, outShape) {
const inRank = inShape.length;
const dims = [];
for (let i = 0; i < inRank; i++) {
const dim = inRank - 1 - i;
const a = inShape[dim] || 1;
const b = outShape[outShape.length - 1 - i] || 1;
if (b > 1 && a === 1) {
dims.unshift(dim);
}
}
return dims;
}
/**
* Returns the axes in the output space that should be reduced to produce
* the input space.
*/
function getReductionAxes(inShape, outShape) {
const result = [];
for (let i = 0; i < outShape.length; i++) {
const inDim = inShape[inShape.length - i - 1];
const outAxis = outShape.length - i - 1;
const outDim = outShape[outAxis];
if (inDim == null || (inDim === 1 && outDim > 1)) {
result.unshift(outAxis);
}
}
return result;
}
function assertAndGetBroadcastShape(shapeA, shapeB) {
const result = [];
const l = Math.max(shapeA.length, shapeB.length);
for (let i = 0; i < l; i++) {
let a = shapeA[shapeA.length - i - 1];
if (a == null) {
a = 1;
}
let b = shapeB[shapeB.length - i - 1];
if (b == null) {
b = 1;
}
if (a === 1) {
result.unshift(b);
} else if (b === 1) {
result.unshift(a);
} else if (a !== b) {
const errMsg =
`Operands could not be broadcast together with shapes ` + `${shapeA} and ${shapeB}.`;
throw Error(errMsg);
} else {
result.unshift(a);
}
}
return result;
}
//# sourceMappingURL=broadcast_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Add_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const addGradConfig = {
kernelName: kernel_names["a" /* Add */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
let res = dy;
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
res = res.sum(reduceAxes);
}
return res.reshape(a.shape);
};
const derB = () => {
let res = dy;
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
res = res.sum(reduceAxes);
}
return res.reshape(b.shape);
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Add_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/AddN_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const addNGradConfig = {
kernelName: kernel_names["b" /* AddN */],
saveAllInputs: true,
gradFunc: (dy, saved) => {
const ders = {};
saved.forEach((_, i) => {
ders[i] = () => dy.clone();
});
return ders;
},
};
//# sourceMappingURL=AddN_grad.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util.js
var tensor_util = __webpack_require__(11);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js
var tensor_util_env = __webpack_require__(3);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/operation.js
var operation = __webpack_require__(4);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/add.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
*
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
*
* ```js
* // Broadcast add a with b.
* const a = tf.scalar(5);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
* @param a The first `tf.Tensor` to add.
* @param b The second `tf.Tensor` to add. Must have the same type as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
function add_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "add");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "add");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
const forward = (backend, save) => {
const res = backend.add($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["a" /* Add */]
);
}
const add = Object(operation["a" /* op */])({ add_ });
//# sourceMappingURL=add.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor.js + 1 modules
var dist_tensor = __webpack_require__(7);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util.js
var util = __webpack_require__(1);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js
/**
* @license
* Copyright 2017 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function assertParamsConsistent(shapes, axis) {
const rank = shapes[0].length;
shapes.forEach((shape, i) => {
util["assert"](
shape.length === rank,
() =>
`Error in concat${rank}D: rank of tensors[${i}] must be the same ` +
`as the rank of the rest (${rank})`
);
});
util["assert"](
axis >= 0 && axis < rank,
() => `Error in concat${rank}D: axis must be between 0 and ${rank - 1}.`
);
const firstShape = shapes[0];
shapes.forEach((shape, i) => {
for (let r = 0; r < rank; r++) {
util["assert"](
r === axis || shape[r] === firstShape[r],
() =>
`Error in concat${rank}D: Shape of tensors[${i}] (${shape}) ` +
`does not match the shape of the rest (${firstShape}) ` +
`along the non-concatenated axis ${i}.`
);
}
});
}
function computeOutShape(shapes, axis) {
const outputShape = shapes[0].slice();
for (let i = 1; i < shapes.length; i++) {
outputShape[axis] += shapes[i][axis];
}
return outputShape;
}
//# sourceMappingURL=concat_util.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops.js
var tensor_ops = __webpack_require__(8);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Concatenates a list of `tf.Tensor`s along a given axis.
*
* The tensors ranks and types must match, and their sizes must match in all
* dimensions except `axis`.
*
* Also available are stricter rank-specific methods that assert that
* `tensors` are of the given rank:
* - `tf.concat1d`
* - `tf.concat2d`
* - `tf.concat3d`
* - `tf.concat4d`
*
* Except `tf.concat1d` (which does not have axis param), all methods have
* same signature as this method.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* a.concat(b).print(); // or a.concat(b)
* ```
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
* tf.concat([a, b, c]).print();
* ```
*
* ```js
* const a = tf.tensor2d([[1, 2], [10, 20]]);
* const b = tf.tensor2d([[3, 4], [30, 40]]);
* const axis = 1;
* tf.concat([a, b], axis).print();
* ```
* @param tensors A list of tensors to concatenate.
* @param axis The axis to concate along. Defaults to 0 (the first dim).
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
function concat_(tensors, axis = 0) {
Object(util["assert"])(tensors.length >= 1, () => "Pass at least one tensor to concat");
let $tensors = Object(tensor_util_env["b" /* convertToTensorArray */])(
tensors,
"tensors",
"concat"
);
if ($tensors[0].dtype === "complex64") {
$tensors.forEach((tensor) => {
if (tensor.dtype !== "complex64") {
throw new Error(`Cannot concatenate complex64 tensors with a tensor
with dtype ${tensor.dtype}. `);
}
});
}
const $axis = Object(util["parseAxisParam"])(axis, $tensors[0].shape)[0];
const outShape = computeOutShape(
$tensors.map((t) => t.shape),
$axis
);
if (Object(util["sizeFromShape"])(outShape) === 0) {
return Object(tensor_ops["f" /* tensor */])([], outShape);
}
// Keep only non-empty tensors (ignore tensors with 0 in their shape).
$tensors = $tensors.filter((t) => t.size > 0);
if ($tensors.length === 1) {
return $tensors[0];
}
const shapes = $tensors.map((t) => t.shape);
assertParamsConsistent(shapes, $axis);
const forward = (backend, save) => {
const res = backend.concat($tensors, $axis);
save($tensors);
return res;
};
const inputs = $tensors;
const attr = { axis };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["l" /* Concat */],
attr
);
}
const concat = Object(operation["a" /* op */])({ concat_ });
//# sourceMappingURL=concat.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/array_ops.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reshapes a `tf.Tensor` to a given shape.
*
* Given an input tensor, returns a new tensor with the same values as the
* input tensor with shape `shape`.
*
* If one component of shape is the special value -1, the size of that
* dimension is computed so that the total size remains constant. In
* particular, a shape of [-1] flattens into 1-D. At most one component of
* shape can be -1.
*
* If shape is 1-D or higher, then the operation returns a tensor with shape
* shape filled with the values of tensor. In this case, the number of
* elements implied by shape must be the same as the number of elements in
* tensor.
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* x.reshape([2, 2]).print();
* ```
*
* @param x The input tensor to be reshaped.
* @param shape An array of integers defining the output tensor shape.
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function reshape_(x, shape) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "reshape", null);
shape = util["inferFromImplicitShape"](shape, $x.size);
util["assert"](
$x.size === util["sizeFromShape"](shape),
() => "new shape and old shape must have the same number of elements."
);
const grad = (dy) => {
return { x: () => dy.reshape($x.shape) };
};
const attrs = { shape };
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.reshape($x, shape),
{ x: $x },
grad,
"Reshape",
attrs
);
}
/**
* Removes dimensions of size 1 from the shape of a `tf.Tensor`.
*
* ```js
* const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]);
* x.squeeze().print();
* ```
*
* @param x The input tensor to be squeezed.
* @param axis An optional list of numbers. If specified, only
* squeezes the dimensions listed. The dimension index starts at 0. It
* is an error to squeeze a dimension that is not 1.
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function squeeze_(x, axis) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "squeeze");
return reshape($x, util["squeezeShape"]($x.shape, axis).newShape);
}
/**
* Casts a `tf.Tensor` to a new dtype.
*
* ```js
* const x = tf.tensor1d([1.5, 2.5, 3]);
* tf.cast(x, 'int32').print();
* ```
* @param x The input tensor to be casted.
* @param dtype The dtype to cast the input tensor to.
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function cast_(x, dtype) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cast");
// Sanity checks.
if (!util["isValidDtype"](dtype)) {
throw new Error(`Failed to cast to unknown dtype ${dtype}`);
}
if (
(dtype === "string" && $x.dtype !== "string") ||
(dtype !== "string" && $x.dtype === "string")
) {
throw new Error("Only strings can be casted to strings");
}
const grad = (dy) => {
return { x: () => dy.clone() };
};
const attrs = { dtype };
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.cast($x, dtype),
{ x: $x },
grad,
"Cast",
attrs
);
}
/**
* Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
* tf.stack([a, b, c]).print();
* ```
*
* @param tensors A list of tensor objects with the same shape and dtype.
* @param axis The axis to stack along. Defaults to 0 (the first dim).
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
function stack_(tensors, axis = 0) {
const $tensors = Object(tensor_util_env["b" /* convertToTensorArray */])(
tensors,
"tensors",
"stack"
);
util["assert"]($tensors.length >= 1, () => "Pass at least one tensor to tf.stack");
if ($tensors.length === 1) {
return $tensors[0].expandDims(axis);
}
const rank = $tensors[0].rank;
const shape = $tensors[0].shape;
const dtype = $tensors[0].dtype;
util["assert"](axis <= rank, () => "Axis must be <= rank of the tensor");
$tensors.forEach((t) => {
util["assertShapesMatch"](
shape,
t.shape,
"All tensors passed to stack must have matching shapes"
);
});
$tensors.forEach((t) => {
util["assert"](
dtype === t.dtype,
() => "All tensors passed to stack must have matching dtypes"
);
});
const expandedTensors = $tensors.map((t) => t.expandDims(axis));
return concat(expandedTensors, axis);
}
/**
* Unstacks a `tf.Tensor` of rank-`R` into a list of rank-`(R-1)` `tf.Tensor`s.
*
* ```js
* const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* tf.unstack(a).forEach(tensor => tensor.print());
* ```
*
* @param x A tensor object.
* @param axis The axis to unstack along. Defaults to 0 (the first dim).
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
function unstack_(x, axis = 0) {
axis = axis || 0;
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "unstack");
util["assert"](
axis >= -$x.shape.length && axis < $x.shape.length,
() => `Axis = ${axis} is not in [-${$x.shape.length}, ${$x.shape.length})`
);
if (axis < 0) {
axis += $x.shape.length;
}
const grad = (dy) => {
return { x: () => stack(dy, axis) };
};
const attrs = { axis };
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.unstack($x, axis),
{ x: $x },
grad,
"Unpack",
attrs
);
}
/**
* Returns a `tf.Tensor` that has expanded rank, by inserting a dimension
* into the tensor's shape.
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* const axis = 1;
* x.expandDims(axis).print();
* ```
*
* @param x The input tensor whose dimensions to be expanded.
* @param axis The dimension index at which to insert shape of `1`. Defaults
* to 0 (the first dimension).
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function expandDims_(x, axis = 0) {
const parseAs = null;
const $x = Object(tensor_util_env["a" /* convertToTensor */])(
x,
"x",
"expandDims",
parseAs
);
util["assert"](axis <= $x.rank, () => "Axis must be <= rank of the tensor");
const newShape = $x.shape.slice();
if (axis < 0) {
// Negative value is counted from the tail of rank.
util["assert"](
-($x.rank + 1) <= axis,
() => `Axis must be in the interval [${-($x.rank + 1)}, ${$x.rank}]`
);
axis = $x.rank + axis + 1;
}
newShape.splice(axis, 0, 1);
return reshape($x, newShape);
}
/**
* Computes the difference between two lists of numbers.
*
* Given a Tensor `x` and a Tensor `y`, this operation returns a Tensor `out`
* that represents all values that are in `x` but not in `y`. The returned
* Tensor `out` is sorted in the same order that the numbers appear in `x`
* (duplicates are preserved). This operation also returns a Tensor indices that
* represents the position of each out element in `x`. In other words:
*
* `out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]`
*
* ```js
* const x = [1, 2, 3, 4, 5, 6];
* const y = [1, 3, 5];
*
* const [out, indices] = await tf.setdiff1dAsync(x, y);
* out.print(); // [2, 4, 6]
* indices.print(); // [1, 3, 5]
* ```
*
* @param x 1-D Tensor. Values to keep.
* @param y 1-D Tensor. Must have the same type as x. Values to exclude in the
* output.
* @returns Promise of Tensor tuple [out, indices].
* out: Tensor with the same type as x.
* indices: A Tensor of type int32.
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
async function setdiff1dAsync_(x, y) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "setdiff1d");
const $y = Object(tensor_util_env["a" /* convertToTensor */])(y, "y", "setdiff1d");
util["assert"](
$x.dtype === $y.dtype,
() => `x and y should have the same dtype, but got x (${$x.dtype}) and y (${$y.dtype}).`
);
util["assert"]($x.rank === 1, () => `x should be 1D tensor, but got x (${$x.shape}).`);
util["assert"]($y.rank === 1, () => `y should be 1D tensor, but got y (${$y.shape}).`);
const xVals = await $x.data();
const yVals = await $y.data();
const ySet = new Set(yVals);
let outputSize = 0;
for (let i = 0; i < xVals.length; i++) {
if (!ySet.has(xVals[i])) {
outputSize++;
}
}
const buffer = new dist_tensor["b" /* TensorBuffer */]([outputSize], $x.dtype);
const indices = new dist_tensor["b" /* TensorBuffer */]([outputSize], "int32");
for (let i = 0, p = 0; i < xVals.length; i++) {
if (!ySet.has(xVals[i])) {
buffer.values[p] = xVals[i];
indices.values[p] = i;
p++;
}
}
return [buffer.toTensor(), indices.toTensor()];
}
/**
* Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.
*
* The values are stored in CPU as `TypedArray`. Fill the buffer using
* `buffer.set()`, or by modifying directly `buffer.values`.
*
* When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with
* those values.
*
* ```js
* // Create a buffer and set values at particular indices.
* const buffer = tf.buffer([2, 2]);
* buffer.set(3, 0, 0);
* buffer.set(5, 1, 0);
*
* // Convert the buffer back to a tensor.
* buffer.toTensor().print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
* @param dtype The dtype of the buffer. Defaults to 'float32'.
* @param values The values of the buffer as `TypedArray`. Defaults to
* zeros.
*/
/** @doc {heading: 'Tensors', subheading: 'Creation'} */
function array_ops_buffer(shape, dtype = "float32", values) {
dtype = dtype || "float32";
util["assertNonNegativeIntegerDimensions"](shape);
return new dist_tensor["b" /* TensorBuffer */](shape, dtype, values);
}
/**
* Prints information about the `tf.Tensor` including its data.
*
* ```js
* const verbose = true;
* tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);
* ```
* @param x The tensor to be printed.
* @param verbose Whether to print verbose information about the ` Tensor`,
* including dtype and size.
*/
/** @doc {heading: 'Tensors', subheading: 'Creation'} */
function print(x, verbose = false) {
console.log(x.toString(verbose));
}
const cast = Object(operation["a" /* op */])({ cast_ });
const expandDims = Object(operation["a" /* op */])({ expandDims_ });
const reshape = Object(operation["a" /* op */])({ reshape_ });
const squeeze = Object(operation["a" /* op */])({ squeeze_ });
const stack = Object(operation["a" /* op */])({ stack_ });
const unstack = Object(operation["a" /* op */])({ unstack_ });
const setdiff1dAsync = setdiff1dAsync_;
//# sourceMappingURL=array_ops.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
* The result is rounded with floor function.
*
*
* ```js
* const a = tf.tensor1d([1, 4, 9, 16]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.floorDiv(b).print(); // or tf.div(a, b)
* ```
*
* ```js
* // Broadcast div a with b.
* const a = tf.tensor1d([2, 4, 6, 8]);
* const b = tf.scalar(2);
*
* a.floorDiv(b).print(); // or tf.floorDiv(a, b)
* ```
*
* @param a The first tensor as the numerator.
* @param b The second tensor as the denominator. Must have the same dtype as
* `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
function floorDiv_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "floorDiv");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "floorDiv");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
const forward = (backend, save) => {
const res = backend.floorDiv($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["D" /* FloorDiv */]
);
}
const floorDiv = Object(operation["a" /* op */])({ floorDiv_ });
//# sourceMappingURL=floorDiv.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/div.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 4, 9, 16]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* ```js
* // Broadcast div a with b.
* const a = tf.tensor1d([2, 4, 6, 8]);
* const b = tf.scalar(2);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* @param a The first tensor as the numerator.
* @param b The second tensor as the denominator. Must have the same dtype as
* `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
function div_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "div");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "div");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
if ($a.dtype === "int32" && $b.dtype === "int32") {
return floorDiv($a, $b);
}
const forward = (backend, save) => {
const res = backend.realDivide($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
const attrs = {};
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["y" /* Div */],
attrs
);
}
const div = Object(operation["a" /* op */])({ div_ });
//# sourceMappingURL=div.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mul.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.
*
* We also expose `tf.mulStrict` which has the same signature as this op and
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([2, 3, 4, 5]);
*
* a.mul(b).print(); // or tf.mul(a, b)
* ```
*
* ```js
* // Broadcast mul a with b.
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.scalar(5);
*
* a.mul(b).print(); // or tf.mul(a, b)
* ```
* @param a The first tensor to multiply.
* @param b The second tensor to multiply. Must have the same dtype as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
function mul_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "mul");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "mul");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
const forward = (backend, save) => {
const res = backend.multiply($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["Y" /* Multiply */]
);
}
const mul = Object(operation["a" /* op */])({ mul_ });
//# sourceMappingURL=mul.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Provided `f(x)`, returns another function `g(x, dy?)`, which gives the
* gradient of `f(x)` with respect to `x`.
*
* If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to
* `x` is computed instead. `f(x)` must take a single tensor `x` and return a
* single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead.
*
* ```js
* // f(x) = x ^ 2
* const f = x => x.square();
* // f'(x) = 2x
* const g = tf.grad(f);
*
* const x = tf.tensor1d([2, 3]);
* g(x).print();
* ```
*
* ```js
* // f(x) = x ^ 3
* const f = x => x.pow(tf.scalar(3, 'int32'));
* // f'(x) = 3x ^ 2
* const g = tf.grad(f);
* // f''(x) = 6x
* const gg = tf.grad(g);
*
* const x = tf.tensor1d([2, 3]);
* gg(x).print();
* ```
*
* @param f The function f(x), to compute gradient for.
*/
/** @doc {heading: 'Training', subheading: 'Gradients'} */
function gradients_grad(f) {
util["assert"](util["isFunction"](f), () => "The f passed in grad(f) must be a function");
return (x, dy) => {
// x can be of any dtype, thus null as the last argument.
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tf.grad", null);
const $dy =
dy != null
? Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "tf.grad")
: null;
return engine["a" /* ENGINE */].tidy(() => {
const { value, grads } = engine["a" /* ENGINE */].gradients(() => f($x), [$x], $dy);
if ($dy != null) {
util["assertShapesMatch"](
value.shape,
$dy.shape,
"The shape of dy passed in grad(f)(x, dy) must match the shape " +
"returned by f(x)"
);
}
checkGrads(grads);
return grads[0];
});
};
}
/**
* Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`,
* which gives an array of gradients of `f()` with respect to each input
* [`x1`,`x2`,...].
*
* If `dy` is passed when calling `g()`, the gradient of
* `f(x1,...).mul(dy).sum()` with respect to each input is computed instead.
* The provided `f` must take one or more tensors and return a single tensor
* `y`. If `f()` takes a single input, we recommend using `tf.grad` instead.
*
* ```js
* // f(a, b) = a * b
* const f = (a, b) => a.mul(b);
* // df / da = b, df / db = a
* const g = tf.grads(f);
*
* const a = tf.tensor1d([2, 3]);
* const b = tf.tensor1d([-2, -3]);
* const [da, db] = g([a, b]);
* console.log('da');
* da.print();
* console.log('db');
* db.print();
* ```
*
* @param f The function `f(x1, x2,...)` to compute gradients for.
*/
/** @doc {heading: 'Training', subheading: 'Gradients'} */
function gradients_grads(f) {
util["assert"](util["isFunction"](f), () => "The f passed in grads(f) must be a function");
return (args, dy) => {
util["assert"](
Array.isArray(args),
() =>
"The args passed in grads(f)(args) must be an array " +
"of `Tensor`s or `TensorLike`s"
);
// args can be of any dtype, thus null as the last argument.
const $args = Object(tensor_util_env["b" /* convertToTensorArray */])(
args,
"args",
"tf.grads",
null
);
const $dy =
dy != null
? Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "tf.grads")
: null;
return engine["a" /* ENGINE */].tidy(() => {
const { value, grads } = engine["a" /* ENGINE */].gradients(
() => f(...$args),
$args,
$dy
);
if ($dy != null) {
util["assertShapesMatch"](
value.shape,
$dy.shape,
"The shape of dy passed in grads(f)([x1,...], dy) must " +
"match the shape returned by f([x1,...])"
);
}
checkGrads(grads);
return grads;
});
};
}
/**
* Like `tf.grad`, but also returns the value of `f()`. Useful when `f()`
* returns a metric you want to show.
*
* The result is a rich object with the following properties:
* - grad: The gradient of `f(x)` w.r.t `x` (result of `tf.grad`).
* - value: The value returned by `f(x)`.
*
* ```js
* // f(x) = x ^ 2
* const f = x => x.square();
* // f'(x) = 2x
* const g = tf.valueAndGrad(f);
*
* const x = tf.tensor1d([2, 3]);
* const {value, grad} = g(x);
*
* console.log('value');
* value.print();
* console.log('grad');
* grad.print();
* ```
*/
/** @doc {heading: 'Training', subheading: 'Gradients'} */
function valueAndGrad(f) {
util["assert"](
util["isFunction"](f),
() => "The f passed in valueAndGrad(f) must be a function"
);
return (x, dy) => {
util["assert"](
x instanceof dist_tensor["a" /* Tensor */],
() => "The x passed in valueAndGrad(f)(x) must be a tensor"
);
util["assert"](
dy == null || dy instanceof dist_tensor["a" /* Tensor */],
() => "The dy passed in valueAndGrad(f)(x, dy) must be a tensor"
);
const { grads, value } = engine["a" /* ENGINE */].gradients(() => f(x), [x], dy);
checkGrads(grads);
return { grad: grads[0], value };
};
}
/**
* Like `tf.grads`, but returns also the value of `f()`. Useful when `f()`
* returns a metric you want to show.
*
* The result is a rich object with the following properties:
* - grads: The gradients of `f()` w.r.t each input (result of `tf.grads`).
* - value: The value returned by `f(x)`.
*
* ```js
* // f(a, b) = a * b
* const f = (a, b) => a.mul(b);
* // df/da = b, df/db = a
* const g = tf.valueAndGrads(f);
*
* const a = tf.tensor1d([2, 3]);
* const b = tf.tensor1d([-2, -3]);
* const {value, grads} = g([a, b]);
*
* const [da, db] = grads;
*
* console.log('value');
* value.print();
*
* console.log('da');
* da.print();
* console.log('db');
* db.print();
* ```
*/
/** @doc {heading: 'Training', subheading: 'Gradients'} */
function valueAndGrads(f) {
util["assert"](
util["isFunction"](f),
() => "The f passed in valueAndGrads(f) must be a function"
);
return (args, dy) => {
util["assert"](
Array.isArray(args) &&
args.every((arg) => arg instanceof dist_tensor["a" /* Tensor */]),
() => "The args passed in valueAndGrads(f)(args) must be array of " + "tensors"
);
util["assert"](
dy == null || dy instanceof dist_tensor["a" /* Tensor */],
() => "The dy passed in valueAndGrads(f)(args, dy) must be a tensor"
);
const res = engine["a" /* ENGINE */].gradients(() => f(...args), args, dy);
if (dy != null) {
util["assertShapesMatch"](
res.value.shape,
dy.shape,
"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must " +
"match the shape returned by f([x1,...])"
);
}
checkGrads(res.grads);
return res;
};
}
/**
* Computes and returns the gradient of f(x) with respect to the list of
* trainable variables provided by `varList`. If no list is provided, it
* defaults to all trainable variables.
*
* ```js
* const a = tf.variable(tf.tensor1d([3, 4]));
* const b = tf.variable(tf.tensor1d([5, 6]));
* const x = tf.tensor1d([1, 2]);
*
* // f(a, b) = a * x ^ 2 + b * x
* const f = () => a.mul(x.square()).add(b.mul(x)).sum();
* // df/da = x ^ 2, df/db = x
* const {value, grads} = tf.variableGrads(f);
*
* Object.keys(grads).forEach(varName => grads[varName].print());
* ```
*
* @param f The function to execute. f() should return a scalar.
* @param varList The list of variables to compute the gradients with respect
* to. Defaults to all trainable variables.
* @returns An object with the following keys and values:
* - `value`: The value of the function `f`.
* - `grads`: A map from the names of the variables to the gradients.
* If the `varList` argument is provided explicitly and contains a subset of
* non-trainable variables, this map in the return value will contain keys
* that map the names of the non-trainable variables to `null`.
*/
/** @doc {heading: 'Training', subheading: 'Gradients'} */
function variableGrads(f, varList) {
util["assert"](
util["isFunction"](f),
() => "The f passed in variableGrads(f) must be a function"
);
util["assert"](
varList == null ||
(Array.isArray(varList) &&
varList.every((v) => v instanceof dist_tensor["c" /* Variable */])),
() => "The varList passed in variableGrads(f, varList) must be an array " + "of variables"
);
const specifiedVarList = varList != null;
if (!specifiedVarList) {
// Get all of the trainable variables.
varList = [];
for (const varName in engine["a" /* ENGINE */].registeredVariables) {
varList.push(engine["a" /* ENGINE */].registeredVariables[varName]);
}
}
const specifiedNonTrainable = specifiedVarList
? varList.filter((variable) => !variable.trainable)
: null;
// Prune non-trainable variables.
const originalVarCount = varList.length;
varList = varList.filter((variable) => variable.trainable);
util["assert"](
varList.length > 0,
() =>
`variableGrads() expects at least one of the input variables to ` +
`be trainable, but none of the ${originalVarCount} variables is ` +
`trainable.`
);
const allowNoGradients = true;
const { value, grads } = engine["a" /* ENGINE */].gradients(
f,
varList,
null,
allowNoGradients
);
util["assert"](
grads.some((g) => g != null),
() =>
"Cannot find a connection between any variable and the result of " +
"the loss function y=f(x). Please make sure the operations that " +
"use variables are inside the function f passed to minimize()."
);
util["assert"](
value.rank === 0,
() =>
`The f passed in variableGrads(f) must return a scalar, but it ` +
`returned a rank-${value.rank} tensor`
);
const namedGrads = {};
varList.forEach((v, i) => {
if (grads[i] != null) {
namedGrads[v.name] = grads[i];
}
});
if (specifiedNonTrainable != null) {
// If varList is explicitly provided and contains non-trainable values,
// add them to the returned gradients with `null` values.
specifiedNonTrainable.forEach((v) => (namedGrads[v.name] = null));
}
return { value, grads: namedGrads };
}
/**
* Overrides the gradient computation of a function `f`.
*
* Takes a function
* `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}`
* and returns another function `g(...inputs)` which takes the same inputs as
* `f`. When called, `g` returns `f().value`. In backward mode, custom gradients
* with respect to each input of `f` are computed using `f().gradFunc`.
*
* The `save` function passsed to `f` should be used for saving tensors needed
* in the gradient. And the `saved` passed to the `gradFunc` is a
* `NamedTensorMap`, which contains those saved tensor.
*
* ```js
* const customOp = tf.customGrad((x, save) => {
* // Save x to make sure it's available later for the gradient.
* save([x]);
* // Override gradient of our custom x ^ 2 op to be dy * abs(x);
* return {
* value: x.square(),
* // Note `saved.x` which points to the `x` we saved earlier.
* gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]
* };
* });
*
* const x = tf.tensor1d([-1, -2, 3]);
* const dx = tf.grad(x => customOp(x));
*
* console.log(`f(x):`);
* customOp(x).print();
* console.log(`f'(x):`);
* dx(x).print();
* ```
*
* @param f The function to evaluate in forward mode, which should return
* `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc`
* returns the custom gradients of `f` with respect to its inputs.
*/
/** @doc {heading: 'Training', subheading: 'Gradients'} */
function customGrad(f) {
return engine["a" /* ENGINE */].customGrad(f);
}
function checkGrads(grads) {
const numNullGradients = grads.filter((g) => g == null).length;
if (numNullGradients > 0) {
throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that
the f you passed encloses all operations that lead from x to y.`);
}
}
//# sourceMappingURL=gradients.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js
/**
* @license
* Copyright 2017 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns true if the axis specifies the inner most dimensions of the
* array.
*/
function axesAreInnerMostDims(axes, rank) {
for (let i = 0; i < axes.length; ++i) {
if (axes[axes.length - i - 1] !== rank - 1 - i) {
return false;
}
}
return true;
}
function combineLocations(outputLoc, reduceLoc, axes) {
const rank = outputLoc.length + reduceLoc.length;
const loc = [];
let outIdx = 0;
let reduceIdx = 0;
for (let dim = 0; dim < rank; dim++) {
if (axes.indexOf(dim) === -1) {
loc.push(outputLoc[outIdx++]);
} else {
loc.push(reduceLoc[reduceIdx++]);
}
}
return loc;
}
function computeOutAndReduceShapes(aShape, axes) {
const outShape = [];
const rank = aShape.length;
for (let dim = 0; dim < rank; dim++) {
if (axes.indexOf(dim) === -1) {
outShape.push(aShape[dim]);
}
}
const reduceShape = axes.map((dim) => aShape[dim]);
return [outShape, reduceShape];
}
function expandShapeToKeepDim(shape, axes) {
const reduceSubShape = axes.map((x) => 1);
return combineLocations(shape, reduceSubShape, axes);
}
function assertAxesAreInnerMostDims(msg, axes, rank) {
util["assert"](
axesAreInnerMostDims(axes, rank),
() =>
`${msg} supports only inner-most axes for now. ` +
`Got axes ${axes} and rank-${rank} input.`
);
}
/**
* Returns the axes permutation to be used with `tf.transpose`, if such
* permutation is necessary. Otherwise it returns null. This method is used by
* operations that operate only on inner-most axes.
*/
function getAxesPermutation(axes, rank) {
if (axesAreInnerMostDims(axes, rank)) {
return null;
}
const result = [];
for (let i = 0; i < rank; ++i) {
if (axes.indexOf(i) === -1) {
result.push(i);
}
}
axes.forEach((axis) => result.push(axis));
return result;
}
/** Returns the axes permutation that undoes the original permutation. */
function getUndoAxesPermutation(axes) {
return axes
.map((axis, i) => [i, axis])
.sort((a, b) => a[1] - b[1])
.map((x) => x[0]);
}
function getInnerMostAxes(numAxes, rank) {
const res = [];
for (let i = rank - numAxes; i < rank; ++i) {
res.push(i);
}
return res;
}
//# sourceMappingURL=axis_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reduction_ops_util.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Gradient helper function for the min and max operations.
*/
function gradForMinAndMax(dy, y, xOrig, origAxes, permutedAxes) {
if (y.rank < xOrig.rank) {
y = y.reshape(expandShapeToKeepDim(y.shape, origAxes));
}
if (dy.rank < xOrig.rank) {
dy = dy.reshape(expandShapeToKeepDim(dy.shape, origAxes));
}
return {
x: () => {
const dx = dy.mul(xOrig.equal(y).cast(dy.dtype));
return permutedAxes == null ? dx : dx.transpose(permutedAxes);
},
};
}
//# sourceMappingURL=reduction_ops_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reduction_ops.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the log(sum(exp(elements across the reduction dimensions)).
*
* Reduces the input along the dimensions given in `axis`. Unless `keepDims`
* is true, the rank of the array is reduced by 1 for each entry in `axis`.
* If `keepDims` is true, the reduced dimensions are retained with length 1.
* If `axis` has no entries, all dimensions are reduced, and an array with a
* single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.logSumExp().print(); // or tf.logSumExp(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.logSumExp(axis).print(); // or tf.logSumExp(a, axis)
* ```
* @param x The input tensor.
* @param axis The dimension(s) to reduce. If null (the default),
* reduces all dimensions.
* @param keepDims If true, retains reduced dimensions with length
* of 1. Defaults to false.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function logSumExp_(x, axis = null, keepDims = false) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "logSumExp");
const axes = util["parseAxisParam"](axis, $x.shape);
const xMax = $x.max(axes, true /* keepDims */);
const a = $x.sub(xMax);
const b = a.exp();
const c = b.sum(axes);
const d = c.log();
const res = xMax.reshape(d.shape).add(d);
if (keepDims) {
const newShape = expandShapeToKeepDim(res.shape, axes);
return res.reshape(newShape);
}
return res;
}
/**
* Computes the sum of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If axes has no entries, all dimensions are reduced, and a
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.sum().print(); // or tf.sum(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.sum(axis).print(); // or tf.sum(x, axis)
* ```
*
* @param x The input tensor to compute the sum over. If the dtype is `bool`
* it will be converted to `int32` and the output dtype will be `int32`.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function sum_(x, axis = null, keepDims = false) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sum");
if ($x.dtype === "bool") {
$x = $x.toInt();
}
const axes = util["parseAxisParam"](axis, $x.shape);
// Use a custom gradient to bypass 2 gradient backprops since sum is used
// extremely often.
const customOp = customGrad((x) => {
const permutation = getAxesPermutation(axes, x.rank);
let reductionAxes = axes;
let permutedX = x;
if (permutation != null) {
permutedX = x.transpose(permutation);
reductionAxes = getInnerMostAxes(reductionAxes.length, x.rank);
}
const gradFunc = (dy) => {
const expandedDyShape = x.shape.slice();
axes.forEach((axis) => {
expandedDyShape[axis] = 1;
});
const expandedDy = dy.reshape(expandedDyShape);
const derX = expandedDy.mul(Object(tensor_ops["b" /* ones */])(x.shape, "float32"));
return derX;
};
const gradInputs = (dy) => {
return { x: () => gradFunc(dy) };
};
const attrs = { axes: reductionAxes };
let value = engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.sum(permutedX, reductionAxes),
{ x: permutedX },
gradInputs,
"Sum",
attrs
);
if (keepDims) {
const newShape = expandShapeToKeepDim(value.shape, axes);
value = value.reshape(newShape);
}
return { value, gradFunc };
});
return customOp($x);
}
/**
* Computes the product of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If `axes` has no entries, all dimensions are reduced, and a
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.prod().print(); // or tf.prod(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.prod(axis).print(); // or tf.prod(x, axis)
* ```
*
* @param x The input tensor to compute the product over. If the dtype is `bool`
* it will be converted to `int32` and the output dtype will be `int32`.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function prod_(x, axis = null, keepDims = false) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "prod");
if ($x.dtype === "bool") {
$x = $x.toInt();
}
const axes = util["parseAxisParam"](axis, $x.shape);
const permutation = getAxesPermutation(axes, $x.rank);
let reductionAxes = axes;
let permutedX = $x;
if (permutation != null) {
permutedX = $x.transpose(permutation);
reductionAxes = getInnerMostAxes(reductionAxes.length, $x.rank);
}
let value = engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.prod(permutedX, reductionAxes),
{ permutedX }
);
if (keepDims) {
const newShape = expandShapeToKeepDim(value.shape, axes);
value = value.reshape(newShape);
}
return value;
}
/**
* Computes the mean of elements across dimensions of a `tf.Tensor`.
*
* Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is
* true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`.
* If `keepDims` is true, the reduced dimensions are retained with length 1.
* If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with
* a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.mean().print(); // or tf.mean(a)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.mean(axis).print(); // or tf.mean(x, axis)
* ```
*
* @param x The input tensor.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function mean_(x, axis = null, keepDims = false) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "mean");
const axes = util["parseAxisParam"](axis, $x.shape);
const shapes = computeOutAndReduceShapes($x.shape, axes);
const reduceShape = shapes[1];
const reduceSize = util["sizeFromShape"](reduceShape);
// Use a custom gradient to bypass 2 gradient backprops since mean is used
// extremely often.
const customOp = customGrad((x) => {
const reduceSizeScalar = Object(tensor_ops["e" /* scalar */])(reduceSize);
// Cast if needed.
const xReduce = reduceSizeScalar.dtype === x.dtype ? x : x.cast(reduceSizeScalar.dtype);
const res = xReduce.div(reduceSizeScalar);
const value = res.sum(axis, keepDims);
const gradFunc = (dy) => {
const expandedDyShape = x.shape.slice();
axes.forEach((axis) => {
expandedDyShape[axis] = 1;
});
const expandedDy = dy.reshape(expandedDyShape);
const derX = expandedDy
.mul(Object(tensor_ops["b" /* ones */])(x.shape, "float32"))
.div(reduceSize);
return derX;
};
return { value, gradFunc };
});
return customOp($x);
}
/**
* Computes the minimum value from the input.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the array is reduced by 1 for each entry in `axes`.
* If `keepDims` is true, the reduced dimensions are retained with length 1.
* If `axes` has no entries, all dimensions are reduced, and an array with a
* single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.min().print(); // or tf.min(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.min(axis).print(); // or tf.min(x, axis)
* ```
*
* @param x The input Tensor.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function min_(x, axis = null, keepDims = false) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "min");
const xOrig = $x;
const origAxes = util["parseAxisParam"](axis, $x.shape);
let axes = origAxes;
const permutedAxes = getAxesPermutation(axes, $x.rank);
if (permutedAxes != null) {
$x = $x.transpose(permutedAxes);
axes = getInnerMostAxes(axes.length, $x.rank);
}
const grad = (dy, saved) =>
gradForMinAndMax(dy, saved[1], saved[0], origAxes, permutedAxes);
const inputsToSave = [$x];
const outputsToSave = [true];
let res = engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const y = backend.min($x, axes);
save([xOrig, y]);
return y;
},
{ x: $x },
grad,
"Min",
{ axes },
inputsToSave,
outputsToSave
);
if (keepDims) {
const newShape = expandShapeToKeepDim(res.shape, origAxes);
res = res.reshape(newShape);
}
return res;
}
/**
* Returns the indices of the minimum values along an `axis`.
*
* The result has the same shape as `input` with the dimension along `axis`
* removed.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.argMin().print(); // or tf.argMin(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
*
* const axis = 1;
* x.argMin(axis).print(); // or tf.argMin(x, axis)
* ```
*
* @param x The input tensor.
* @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
*
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function argMin_(x, axis = 0) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "argMin");
if (axis == null) {
axis = 0;
}
let axes = util["parseAxisParam"](axis, $x.shape);
const permutedAxes = getAxesPermutation(axes, $x.rank);
if (permutedAxes != null) {
$x = $x.transpose(permutedAxes);
axes = getInnerMostAxes(axes.length, $x.rank);
}
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => Object(tensor_ops["o" /* zerosLike */])($x) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.argMin($x, axes[0]);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Returns the indices of the maximum values along an `axis`.
*
* The result has the same shape as `input` with the dimension along `axis`
* removed.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.argMax().print(); // or tf.argMax(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
*
* const axis = 1;
* x.argMax(axis).print(); // or tf.argMax(x, axis)
* ```
*
* @param x The input tensor.
* @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function argMax_(x, axis = 0) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "argMax");
if (axis == null) {
axis = 0;
}
let axes = util["parseAxisParam"](axis, $x.shape);
const permutedAxes = getAxesPermutation(axes, $x.rank);
if (permutedAxes != null) {
$x = $x.transpose(permutedAxes);
axes = getInnerMostAxes(axes.length, $x.rank);
}
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => Object(tensor_ops["o" /* zerosLike */])($x) };
};
const attrs = { axis: axes[0] };
const inputsToSave = [$x];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.argMax($x, axes[0]);
save([$x]);
return res;
},
{ x: $x },
grad,
"ArgMax",
attrs,
inputsToSave
);
}
/**
* Computes the logical and of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If `axes` has no entries, all dimensions are reduced, and an
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 1, 1], 'bool');
*
* x.all().print(); // or tf.all(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
*
* const axis = 1;
* x.all(axis).print(); // or tf.all(x, axis)
* ```
*
* @param x The input tensor. Must be of dtype bool.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function all_(x, axis = null, keepDims = false) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "all", "bool");
const origAxes = util["parseAxisParam"](axis, $x.shape);
let axes = origAxes;
const permutedAxes = getAxesPermutation(axes, $x.rank);
if (permutedAxes != null) {
$x = $x.transpose(permutedAxes);
axes = getInnerMostAxes(axes.length, $x.rank);
}
const res = engine["a" /* ENGINE */].runKernelFunc((backend) => backend.all($x, axes), {
$x,
});
if (keepDims) {
const newShape = expandShapeToKeepDim(res.shape, origAxes);
return res.reshape(newShape);
}
return res;
}
/**
* Computes the logical or of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If `axes` has no entries, all dimensions are reduced, and an
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 1, 1], 'bool');
*
* x.any().print(); // or tf.any(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
*
* const axis = 1;
* x.any(axis).print(); // or tf.any(x, axis)
* ```
*
* @param x The input tensor. Must be of dtype bool.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*/
/** @doc {heading: 'Operations', subheading: 'Reduction'} */
function any_(x, axis = null, keepDims = false) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "any", "bool");
const origAxes = util["parseAxisParam"](axis, $x.shape);
let axes = origAxes;
const permutedAxes = getAxesPermutation(axes, $x.rank);
if (permutedAxes != null) {
$x = $x.transpose(permutedAxes);
axes = getInnerMostAxes(axes.length, $x.rank);
}
const res = engine["a" /* ENGINE */].runKernelFunc((backend) => backend.any($x, axes), {
$x,
});
if (keepDims) {
const newShape = expandShapeToKeepDim(res.shape, origAxes);
return res.reshape(newShape);
}
return res;
}
/**
* Calculates the mean and variance of `x`. The mean and variance are
* calculated by aggregating the contents of `x` across `axes`. If `x` is
* 1-D and `axes = [0]` this is just the mean and variance of a vector.
*
* @param x The input tensor.
* @param axis The dimension(s) along with to compute mean and
* variance. By default it reduces all dimensions.
* @param keepDims If true, the moments have the same dimensionality as the
* input.
* @return An object with two keys: `mean` and `variance`.
*/
/** @doc {heading: 'Operations', subheading: 'Normalization'} */
function moments_(x, axis = null, keepDims = false) {
x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "moments");
const axes = util["parseAxisParam"](axis, x.shape);
const mean = x.mean(axes, keepDims);
let keepDimsShape = mean.shape;
if (!keepDims) {
keepDimsShape = expandShapeToKeepDim(mean.shape, axes);
}
const devSquared = x.toFloat().sub(mean.reshape(keepDimsShape)).square();
const variance = devSquared.mean(axes, keepDims);
return { mean, variance };
}
const reduction_ops_all = Object(operation["a" /* op */])({ all_ });
// tslint:disable-next-line:variable-name
const any = Object(operation["a" /* op */])({ any_ });
const argMax = Object(operation["a" /* op */])({ argMax_ });
const argMin = Object(operation["a" /* op */])({ argMin_ });
const logSumExp = Object(operation["a" /* op */])({ logSumExp_ });
const reduction_ops_mean = Object(operation["a" /* op */])({ mean_ });
const reduction_ops_min = Object(operation["a" /* op */])({ min_ });
const moments = Object(operation["a" /* op */])({ moments_ });
const sum = Object(operation["a" /* op */])({ sum_ });
const reduction_ops_prod = Object(operation["a" /* op */])({ prod_ });
//# sourceMappingURL=reduction_ops.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/square.js
/**
* @license
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes square of `x` element-wise: `x ^ 2`
*
* ```js
* const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);
*
* x.square().print(); // or tf.square(x)
* ```
* @param x The input Tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function square_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "square");
const attrs = {};
const inputsToSave = [$x];
const outputsToSave = [];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
save([$x]);
return backend.square($x);
},
{ x: $x },
null /* grad */,
"Square",
attrs,
inputsToSave,
outputsToSave
);
}
const square = Object(operation["a" /* op */])({ square_ });
//# sourceMappingURL=square.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/unary_ops.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes `-1 * x` element-wise.
*
* ```js
* const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);
*
* x.neg().print(); // or tf.neg(x)
* ```
*
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function neg_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "neg");
const grad = (dy) => {
return { x: () => dy.neg() };
};
const attrs = {};
const inputsToSave = [$x];
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.neg($x),
{ x: $x },
grad,
"Neg",
attrs,
inputsToSave
);
}
/**
* Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)`
*
* ```js
* const x = tf.tensor1d([.6, 1.1, -3.3]);
*
* x.ceil().print(); // or tf.ceil(x)
* ```
* @param x The input Tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function ceil_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "ceil");
// TODO(manrajgrover): Return null for gradients when backprop supports it.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.ceil($x), { $x }, grad);
}
/**
* Computes floor of input `tf.Tensor` element-wise: `floor(x)`.
*
* ```js
* const x = tf.tensor1d([.6, 1.1, -3.3]);
*
* x.floor().print(); // or tf.floor(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function floor_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "floor");
// TODO(nsthorat): Let gradients be null for cases where we want to stop
// backpropgation.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.floor($x), { $x }, grad);
}
/**
* Returns an element-wise indication of the sign of a number.
*
* ```js
* const x = tf.tensor1d([.6, 1.1, -3.3, NaN, 0]);
*
* x.sign().print(); // or tf.sign(x)
* ```
* @param x The input Tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function sign_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sign");
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.sign($x), { $x }, grad);
}
/**
* RReturns which elements of x are NaN.
*
* ```js
* const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
*
* x.isNaN().print(); // or tf.isNaN(x)
* ```
* @param x The input Tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function isNaN_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "isNaN");
// TODO(nsthorat): Let gradients be null for cases where we want to stop
// backpropgation.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.isNaN($x), { $x }, grad);
}
/**
* Returns which elements of x are Infinity or -Infinity.
*
* ```js
* const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
*
* x.isInf().print(); // or tf.isNaN(x)
* ```
* @param x The input Tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function isInf_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "isInf");
// TODO(nsthorat): Let gradients be null for cases where we want to stop
// backpropgation.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.isInf($x), { $x }, grad);
}
/**
* Returns which elements of x are finite.
*
* ```js
* const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
*
* x.isFinite().print(); // or tf.isNaN(x)
* ```
* @param x The input Tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function isFinite_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "isFinite");
// TODO(nsthorat): Let gradients be null for cases where we want to stop
// backpropgation.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.isFinite($x),
{ $x },
grad
);
}
/**
* Computes round of input `tf.Tensor` element-wise: `round(x)`.
* It implements banker's rounding.
*
* ```js
* const x = tf.tensor1d([.6, 1.1, -3.3]);
*
* x.round().print(); // or tf.round(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function round_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "round");
// TODO(nsthorat): Let gradients be null for cases where we want to stop
// backpropgation.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.round($x), { $x }, grad);
}
/**
* Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
*
* ```js
* const x = tf.tensor1d([1, 2, -3]);
*
* x.exp().print(); // or tf.exp(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function exp_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "exp");
const bck = (dy, saved) => {
// tslint:disable-next-line: no-unnecessary-type-assertion
return { x: () => dy.mul(saved[0]) };
};
const attrs = {};
const inputsToSave = [];
const outputsToSave = [true];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const y = backend.exp($x);
save([y]);
return y;
},
{ x: $x },
bck,
"Exp",
attrs,
inputsToSave,
outputsToSave
);
}
/**
* Computes exponential of the input `tf.Tensor` minus one element-wise.
* `e ^ x - 1`
*
* ```js
* const x = tf.tensor1d([1, 2, -3]);
*
* x.expm1().print(); // or tf.expm1(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function expm1_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "expm1");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.mul($x.exp()) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.expm1($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes natural logarithm of the input `tf.Tensor` element-wise: `ln(x)`
*
* ```js
* const x = tf.tensor1d([1, 2, Math.E]);
*
* x.log().print(); // or tf.log(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function log_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "log");
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => dy.div($x.toFloat()) };
};
const attrs = {};
const inputsToSave = [$x];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.log($x);
save([$x]);
return res;
},
{ x: $x },
grad,
"Log",
attrs,
inputsToSave
);
}
/**
* Computes natural logarithm of the input `tf.Tensor` plus one
* element-wise: `ln(1 + x)`
*
* ```js
* const x = tf.tensor1d([1, 2, Math.E - 1]);
*
* x.log1p().print(); // or tf.log1p(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function log1p_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "log1p");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.div($x.add(1)) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.log1p($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)`
*
* ```js
* const x = tf.tensor1d([1, 2, 4, -1]);
*
* x.sqrt().print(); // or tf.sqrt(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function sqrt_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sqrt");
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => dy.div($x.toFloat().sqrt().mul(2)) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.sqrt($x);
save([$x]);
return res;
},
{ x: $x },
grad,
"Sqrt",
{}
);
}
/**
* Computes reciprocal of square root of the input `tf.Tensor` element-wise:
* `y = 1 / sqrt(x)`
*
* ```js
* const x = tf.tensor1d([1, 2, 4, -1]);
*
* x.rsqrt().print(); // or tf.rsqrt(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function rsqrt_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "rsqrt");
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => dy.div($x.pow(1.5).mul(2)).neg() };
};
const inputsToSave = [$x];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.rsqrt($x);
save([$x]);
return res;
},
{ x: $x },
grad,
"Rsqrt",
{} /* attrs */,
inputsToSave
);
}
/**
* Computes reciprocal of x element-wise: `1 / x`
*
* ```js
* const x = tf.tensor1d([0, 1, 2]);
*
* x.reciprocal().print(); // or tf.reciprocal(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function reciprocal_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "reciprocal");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.div($x.square().neg()) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.reciprocal($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes absolute value element-wise: `abs(x)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.abs().print(); // or tf.abs(x)
* ```
* @param x The input `tf.Tensor`.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function abs_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "abs");
if ($x.dtype === "complex64") {
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.complexAbs($x), {
$x,
});
}
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => dy.mul($x.toFloat().step(-1)) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.abs($x);
save([$x]);
return res;
},
{ x: $x },
grad,
"Abs"
);
}
/**
* Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
* ```
* @param x The input tensor.
* @param clipValueMin Lower-bound of range to be clipped to.
* @param clipValueMax Upper-bound of range to be clipped to.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function clipByValue_(x, clipValueMin, clipValueMax) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "clipByValue");
util["assert"](
clipValueMin <= clipValueMax,
() =>
`Error in clip: min (${clipValueMin}) must be ` +
`less than or equal to max (${clipValueMax}).`
);
const grad = (dy, saved) => {
const [$x] = saved;
return {
x: () =>
dy.where(
$x.greaterEqual(clipValueMin).logicalAnd($x.lessEqual(clipValueMax)),
Object(tensor_ops["o" /* zerosLike */])(dy)
),
};
};
const inputsToSave = [$x];
const attr = { min: clipValueMin, max: clipValueMax };
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.clip($x, clipValueMin, clipValueMax);
save([$x]);
return res;
},
{ x: $x },
grad,
"ClipByValue",
attr,
inputsToSave
);
}
/**
* Computes sigmoid element-wise, `1 / (1 + exp(-x))`
*
* ```js
* const x = tf.tensor1d([0, -1, 2, -3]);
*
* x.sigmoid().print(); // or tf.sigmoid(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function sigmoid_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sigmoid");
const grad = (dy, saved) => {
const [y] = saved;
return { x: () => dy.mul(y.mul(Object(tensor_ops["e" /* scalar */])(1).sub(y))) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const y = backend.sigmoid($x);
save([y]);
return y;
},
{ x: $x },
grad,
"Sigmoid"
);
}
/**
* Computes log sigmoid of the input `tf.Tensor` element-wise:
* `logSigmoid(x)`. For numerical stability, we use `-tf.softplus(-x)`.
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.logSigmoid().print(); // or tf.logSigmoid(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function logSigmoid_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "logSigmoid");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.mul($x.neg().sigmoid()) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.softplus($x.neg()).neg();
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes softplus of the input `tf.Tensor` element-wise: `log(exp(x) + 1)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.softplus().print(); // or tf.softplus(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function softplus_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "softplus");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.mul($x.sigmoid()) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.softplus($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes sin of the input Tensor element-wise: `sin(x)`
*
* ```js
* const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
*
* x.sin().print(); // or tf.sin(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function sin_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sin");
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => $x.toFloat().cos().mul(dy) };
};
const inputsToSave = [$x];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.sin($x);
save([$x]);
return res;
},
{ x: $x },
grad,
"Sin",
{} /* attrs */,
inputsToSave
);
}
/**
* Computes cos of the input `tf.Tensor` element-wise: `cos(x)`
*
* ```js
* const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
*
* x.cos().print(); // or tf.cos(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function cos_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cos");
const grad = (dy, saved) => {
const [$x] = saved;
return { x: () => $x.toFloat().sin().neg().mul(dy) };
};
const inputsToSave = [$x];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.cos($x);
save([$x]);
return res;
},
{ x: $x },
grad,
"Cos",
{} /* attrs */,
inputsToSave
);
}
/**
* Computes tan of the input `tf.Tensor` element-wise, `tan(x)`
*
* ```js
* const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
*
* x.tan().print(); // or tf.tan(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function tan_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tan");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.div($x.cos().square()) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.tan($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes asin of the input `tf.Tensor` element-wise: `asin(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.asin().print(); // or tf.asin(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function asin_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "asin");
const grad = (dy, saved) => {
const [$x] = saved;
return {
// tslint:disable-next-line: no-unnecessary-type-assertion
$x: () =>
dy.div(Object(tensor_ops["e" /* scalar */])(1).sub($x.toFloat().square()).sqrt()),
};
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.asin($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes acos of the input `tf.Tensor` element-wise: `acos(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.acos().print(); // or tf.acos(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function acos_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "acos");
const grad = (dy, saved) => {
const [$x] = saved;
return {
$x: () => {
const a = $x.toFloat().square();
const b = Object(tensor_ops["e" /* scalar */])(1).sub(a).sqrt();
// tslint:disable-next-line: no-unnecessary-type-assertion
return dy.div(b).neg();
},
};
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.acos($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes atan of the input `tf.Tensor` element-wise: `atan(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.atan().print(); // or tf.atan(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function atan_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "atan");
const grad = (dy, saved) => {
const [$x] = saved;
return { $x: () => dy.div($x.toFloat().square().add(1)) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.atan($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes hyperbolic sin of the input `tf.Tensor` element-wise: `sinh(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.sinh().print(); // or tf.sinh(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function sinh_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "sinh");
const grad = (dy, saved) => {
const [$x] = saved;
// tslint:disable-next-line: no-unnecessary-type-assertion
return { $x: () => $x.toFloat().cosh().mul(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.sinh($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes hyperbolic cos of the input `tf.Tensor` element-wise: `cosh(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.cosh().print(); // or tf.cosh(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function cosh_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cosh");
const grad = (dy, saved) => {
const [$x] = saved;
// tslint:disable-next-line: no-unnecessary-type-assertion
return { $x: () => $x.toFloat().sinh().mul(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.cosh($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes hyperbolic tangent of the input `tf.Tensor` element-wise: `tanh(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, 70]);
*
* x.tanh().print(); // or tf.tanh(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function tanh_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tanh");
const grad = (dy, saved) => {
const [y] = saved;
// tslint:disable-next-line: no-unnecessary-type-assertion
return { x: () => Object(tensor_ops["e" /* scalar */])(1).sub(y.square()).mul(dy) };
};
const outputsToSave = [true];
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const y = backend.tanh($x);
save([y]);
return y;
},
{ x: $x },
grad,
"Tanh",
{} /* attrs */,
null /* inputsToSave */,
outputsToSave
);
}
/**
* Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise:
* `asinh(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.asinh().print(); // or tf.asinh(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function asinh_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "asinh");
const grad = (dy, saved) => {
const [$x] = saved;
return {
$x: () => {
const a = Object(tensor_ops["e" /* scalar */])(1).add($x.toFloat().square()).sqrt();
// tslint:disable-next-line: no-unnecessary-type-assertion
return dy.div(a);
},
};
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.asinh($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise:
* `acosh(x)`
*
* ```js
* const x = tf.tensor1d([10, 1, 3, 5.7]);
*
* x.acosh().print(); // or tf.acosh(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function acosh_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "acosh");
const grad = (dy, saved) => {
const [$x] = saved;
return {
$x: () => {
const a = $x.toFloat().square().sub(1).sqrt();
// tslint:disable-next-line: no-unnecessary-type-assertion
return dy.div(a);
},
};
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.acosh($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise:
* `atanh(x)`
*
* ```js
* const x = tf.tensor1d([0, .1, -.1, .7]);
*
* x.atanh().print(); // or tf.atanh(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function atanh_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "atanh");
const grad = (dy, saved) => {
const [$x] = saved;
return {
$x: () => dy.div(Object(tensor_ops["e" /* scalar */])(1).sub($x.toFloat().square())),
};
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.atanh($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes gause error function of the input `tf.Tensor` element-wise:
* `erf(x)`
*
* ```js
* const x = tf.tensor1d([0, .1, -.1, .7]);
*
* x.erf().print(); // or tf.erf(x);
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function erf_(x) {
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "erf");
util["assert"](
$x.dtype === "int32" || $x.dtype === "float32",
() => "Input dtype must be `int32` or `float32`."
);
if ($x.dtype === "int32") {
$x = $x.toFloat();
}
const grad = (dy, saved) => {
const [$x] = saved;
return {
$x: () =>
dy.mul(
$x
.square()
.neg()
.exp()
.mul(2 / Math.sqrt(Math.PI))
),
};
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.erf($x);
save([$x]);
return res;
},
{ $x },
grad
);
}
/**
* Computes step of the input `tf.Tensor` element-wise: `x > 0 ? 1 : alpha * x`
*
* ```js
* const x = tf.tensor1d([0, 2, -1, -3]);
*
* x.step(.5).print(); // or tf.step(x, .5)
* ```
* @param x The input tensor.
* @param alpha The gradient when input is negative.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function step_(x, alpha = 0.0) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "step");
// TODO(manrajgrover): Return null for gradients when backprop supports
// it.
const grad = (dy) => {
return { $x: () => Object(tensor_ops["o" /* zerosLike */])(dy) };
};
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.step($x, alpha),
{ $x },
grad
);
}
const abs = Object(operation["a" /* op */])({ abs_ });
const acos = Object(operation["a" /* op */])({ acos_ });
const acosh = Object(operation["a" /* op */])({ acosh_ });
const asin = Object(operation["a" /* op */])({ asin_ });
const asinh = Object(operation["a" /* op */])({ asinh_ });
const atan = Object(operation["a" /* op */])({ atan_ });
const atanh = Object(operation["a" /* op */])({ atanh_ });
const ceil = Object(operation["a" /* op */])({ ceil_ });
const clipByValue = Object(operation["a" /* op */])({ clipByValue_ });
const cos = Object(operation["a" /* op */])({ cos_ });
const cosh = Object(operation["a" /* op */])({ cosh_ });
const erf = Object(operation["a" /* op */])({ erf_ });
const unary_ops_exp = Object(operation["a" /* op */])({ exp_ });
const expm1 = Object(operation["a" /* op */])({ expm1_ });
const floor = Object(operation["a" /* op */])({ floor_ });
const log = Object(operation["a" /* op */])({ log_ });
const log1p = Object(operation["a" /* op */])({ log1p_ });
const logSigmoid = Object(operation["a" /* op */])({ logSigmoid_ });
const neg = Object(operation["a" /* op */])({ neg_ });
const reciprocal = Object(operation["a" /* op */])({ reciprocal_ });
const round = Object(operation["a" /* op */])({ round_ });
const rsqrt = Object(operation["a" /* op */])({ rsqrt_ });
const sigmoid = Object(operation["a" /* op */])({ sigmoid_ });
const sign = Object(operation["a" /* op */])({ sign_ });
const unary_ops_isNaN = Object(operation["a" /* op */])({ isNaN_ });
const isInf = Object(operation["a" /* op */])({ isInf_ });
const unary_ops_isFinite = Object(operation["a" /* op */])({ isFinite_ });
const sin = Object(operation["a" /* op */])({ sin_ });
const sinh = Object(operation["a" /* op */])({ sinh_ });
const softplus = Object(operation["a" /* op */])({ softplus_ });
const sqrt = Object(operation["a" /* op */])({ sqrt_ });
const unary_ops_step = Object(operation["a" /* op */])({ step_ });
const tan = Object(operation["a" /* op */])({ tan_ });
const tanh = Object(operation["a" /* op */])({ tanh_ });
//# sourceMappingURL=unary_ops.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Atan2_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const atan2GradConfig = {
kernelName: kernel_names["c" /* Atan2 */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
const d = add(square(a), square(b));
let res = mul(dy, div(b, d));
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, a.shape);
};
const derB = () => {
const d = add(square(a), square(b));
let res = neg(mul(dy, div(a, d)));
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, b.shape);
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Atan2_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js
/**
* @license
* Copyright 2017 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function computePool2DInfo(
inShape,
filterSize,
strides,
dilations,
pad,
roundingMode,
dataFormat = "channelsLast"
) {
const [filterHeight, filterWidth] = parseTupleParam(filterSize);
let filterShape;
if (dataFormat === "channelsLast") {
filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]];
} else if (dataFormat === "channelsFirst") {
filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]];
} else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
return computeConv2DInfo(
inShape,
filterShape,
strides,
dilations,
pad,
roundingMode,
false,
dataFormat
);
}
/**
* Computes the information for a forward pass of a pooling3D operation.
*/
function computePool3DInfo(
inShape,
filterSize,
strides,
dilations,
pad,
roundingMode,
dataFormat = "NDHWC"
) {
const [filterDepth, filterHeight, filterWidth] = parse3TupleParam(filterSize);
let filterShape;
let $dataFormat;
if (dataFormat === "NDHWC") {
$dataFormat = "channelsLast";
filterShape = [filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]];
} else if (dataFormat === "NCDHW") {
$dataFormat = "channelsFirst";
filterShape = [filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]];
} else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
return computeConv3DInfo(
inShape,
filterShape,
strides,
dilations,
pad,
false,
$dataFormat,
roundingMode
);
}
/**
* Computes the information for a forward pass of a convolution/pooling
* operation.
*/
function computeConv2DInfo(
inShape,
filterShape,
strides,
dilations,
pad,
roundingMode,
depthwise = false,
dataFormat = "channelsLast"
) {
let [batchSize, inHeight, inWidth, inChannels] = [-1, -1, -1, -1];
if (dataFormat === "channelsLast") {
[batchSize, inHeight, inWidth, inChannels] = inShape;
} else if (dataFormat === "channelsFirst") {
[batchSize, inChannels, inHeight, inWidth] = inShape;
} else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
const [filterHeight, filterWidth, , filterChannels] = filterShape;
const [strideHeight, strideWidth] = parseTupleParam(strides);
const [dilationHeight, dilationWidth] = parseTupleParam(dilations);
const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
const { padInfo, outHeight, outWidth } = getPadAndOutInfo(
pad,
inHeight,
inWidth,
strideHeight,
strideWidth,
effectiveFilterHeight,
effectiveFilterWidth,
roundingMode,
dataFormat
);
const outChannels = depthwise ? filterChannels * inChannels : filterChannels;
let outShape;
if (dataFormat === "channelsFirst") {
outShape = [batchSize, outChannels, outHeight, outWidth];
} else if (dataFormat === "channelsLast") {
outShape = [batchSize, outHeight, outWidth, outChannels];
}
return {
batchSize,
dataFormat,
inHeight,
inWidth,
inChannels,
outHeight,
outWidth,
outChannels,
padInfo,
strideHeight,
strideWidth,
filterHeight,
filterWidth,
effectiveFilterHeight,
effectiveFilterWidth,
dilationHeight,
dilationWidth,
inShape,
outShape,
filterShape,
};
}
/**
* Computes the information for a forward pass of a 3D convolution/pooling
* operation.
*/
function computeConv3DInfo(
inShape,
filterShape,
strides,
dilations,
pad,
depthwise = false,
dataFormat = "channelsLast",
roundingMode
) {
let [batchSize, inDepth, inHeight, inWidth, inChannels] = [-1, -1, -1, -1, -1];
if (dataFormat === "channelsLast") {
[batchSize, inDepth, inHeight, inWidth, inChannels] = inShape;
} else if (dataFormat === "channelsFirst") {
[batchSize, inChannels, inDepth, inHeight, inWidth] = inShape;
} else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
const [filterDepth, filterHeight, filterWidth, , filterChannels] = filterShape;
const [strideDepth, strideHeight, strideWidth] = parse3TupleParam(strides);
const [dilationDepth, dilationHeight, dilationWidth] = parse3TupleParam(dilations);
const effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth);
const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
const { padInfo, outDepth, outHeight, outWidth } = get3DPadAndOutInfo(
pad,
inDepth,
inHeight,
inWidth,
strideDepth,
strideHeight,
strideWidth,
effectiveFilterDepth,
effectiveFilterHeight,
effectiveFilterWidth,
roundingMode
);
const outChannels = depthwise ? filterChannels * inChannels : filterChannels;
let outShape;
if (dataFormat === "channelsFirst") {
outShape = [batchSize, outChannels, outDepth, outHeight, outWidth];
} else if (dataFormat === "channelsLast") {
outShape = [batchSize, outDepth, outHeight, outWidth, outChannels];
}
return {
batchSize,
dataFormat,
inDepth,
inHeight,
inWidth,
inChannels,
outDepth,
outHeight,
outWidth,
outChannels,
padInfo,
strideDepth,
strideHeight,
strideWidth,
filterDepth,
filterHeight,
filterWidth,
effectiveFilterDepth,
effectiveFilterHeight,
effectiveFilterWidth,
dilationDepth,
dilationHeight,
dilationWidth,
inShape,
outShape,
filterShape,
};
}
function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) {
if (zeroPad == null) {
zeroPad = computeDefaultPad(inShape, fieldSize, stride);
}
const inputRows = inShape[0];
const inputCols = inShape[1];
const outputRows = conditionalRound(
(inputRows - fieldSize + 2 * zeroPad) / stride + 1,
roundingMode
);
util["assert"](
util["isInt"](outputRows),
() =>
`The output # of rows (${outputRows}) must be an integer. ` +
`Change the stride and/or zero pad parameters`
);
const outputCols = conditionalRound(
(inputCols - fieldSize + 2 * zeroPad) / stride + 1,
roundingMode
);
util["assert"](
util["isInt"](outputCols),
() =>
`The output # of columns (${outputCols}) must be an integer. ` +
`Change the stride and/or zero pad parameters`
);
return [outputRows, outputCols];
}
function computeOutputShape4D(
inShape,
fieldSize,
outChannels,
stride,
zeroPad,
roundingMode
) {
if (zeroPad == null) {
zeroPad = computeDefaultPad(inShape, fieldSize, stride);
}
const inputDepth = inShape[0];
const inputRows = inShape[1];
const inputCols = inShape[2];
const outputDepths = conditionalRound(
(inputDepth - fieldSize + 2 * zeroPad) / stride + 1,
roundingMode
);
util["assert"](
util["isInt"](outputDepths),
() =>
`The output # of depths (${outputDepths}) must be an integer. ` +
`Change the stride and/or zero pad parameters`
);
const outputRows = conditionalRound(
(inputRows - fieldSize + 2 * zeroPad) / stride + 1,
roundingMode
);
util["assert"](
util["isInt"](outputRows),
() =>
`The output # of rows (${outputRows}) must be an integer. ` +
`Change the stride and/or zero pad parameters`
);
const outputCols = conditionalRound(
(inputCols - fieldSize + 2 * zeroPad) / stride + 1,
roundingMode
);
util["assert"](
util["isInt"](outputCols),
() =>
`The output # of columns (${outputCols}) must be an integer. ` +
`Change the stride and/or zero pad parameters`
);
return [outputDepths, outputRows, outputCols, outChannels];
}
function computeDefaultPad(inputShape, fieldSize, stride, dilation = 1) {
const effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation);
return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2);
}
function parseTupleParam(param) {
if (typeof param === "number") {
return [param, param, param];
}
if (param.length === 2) {
return [param[0], param[1], 1];
}
return param;
}
function parse3TupleParam(param) {
return typeof param === "number" ? [param, param, param] : param;
}
/* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d
* Atrous convolution is equivalent to standard convolution with upsampled
* filters with effective_filter_height =
* filter_height + (filter_height - 1) * (dilation - 1)
* and effective_filter_width =
* filter_width + (filter_width - 1) * (dilation - 1),
* produced by inserting dilation - 1 zeros along consecutive elements across
* the filters' spatial dimensions.
* When there is a dilation, this converts a filter dimension to the
* effective filter dimension, so it can be used in a standard convolution.
*/
function getEffectiveFilterSize(filterSize, dilation) {
if (dilation <= 1) {
return filterSize;
}
return filterSize + (filterSize - 1) * (dilation - 1);
}
function getPadAndOutInfo(
pad,
inHeight,
inWidth,
strideHeight,
strideWidth,
filterHeight,
filterWidth,
roundingMode,
dataFormat
) {
let padInfo;
let outHeight;
let outWidth;
if (typeof pad === "number") {
const padType = pad === 0 ? "VALID" : "NUMBER";
padInfo = { top: pad, bottom: pad, left: pad, right: pad, type: padType };
const outShape = computeOutputShape2D(
[inHeight, inWidth],
filterHeight,
strideHeight,
pad,
roundingMode
);
outHeight = outShape[0];
outWidth = outShape[1];
} else if (pad === "same") {
outHeight = Math.ceil(inHeight / strideHeight);
outWidth = Math.ceil(inWidth / strideWidth);
const padAlongHeight = Math.max(
0,
(outHeight - 1) * strideHeight + filterHeight - inHeight
);
const padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth);
const top = Math.floor(padAlongHeight / 2);
const bottom = padAlongHeight - top;
const left = Math.floor(padAlongWidth / 2);
const right = padAlongWidth - left;
padInfo = { top, bottom, left, right, type: "SAME" };
} else if (pad === "valid") {
padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: "VALID" };
outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
} else if (typeof pad === "object") {
const top = dataFormat === "channelsLast" ? pad[1][0] : pad[2][0];
const bottom = dataFormat === "channelsLast" ? pad[1][1] : pad[2][1];
const left = dataFormat === "channelsLast" ? pad[2][0] : pad[3][0];
const right = dataFormat === "channelsLast" ? pad[2][1] : pad[3][1];
const padType =
top === 0 && bottom === 0 && left === 0 && right === 0 ? "VALID" : "EXPLICIT";
padInfo = { top, bottom, left, right, type: padType };
outHeight = conditionalRound(
(inHeight - filterHeight + top + bottom) / strideHeight + 1,
roundingMode
);
outWidth = conditionalRound(
(inWidth - filterWidth + left + right) / strideWidth + 1,
roundingMode
);
} else {
throw Error(`Unknown padding parameter: ${pad}`);
}
return { padInfo, outHeight, outWidth };
}
function get3DPadAndOutInfo(
pad,
inDepth,
inHeight,
inWidth,
strideDepth,
strideHeight,
strideWidth,
filterDepth,
filterHeight,
filterWidth,
roundingMode
) {
let padInfo;
let outDepth;
let outHeight;
let outWidth;
if (typeof pad === "number") {
const padType = pad === 0 ? "VALID" : "NUMBER";
padInfo = {
top: pad,
bottom: pad,
left: pad,
right: pad,
front: pad,
back: pad,
type: padType,
};
const outShape = computeOutputShape4D(
[inDepth, inHeight, inWidth, 1],
filterDepth,
1,
strideDepth,
pad,
roundingMode
);
outDepth = outShape[0];
outHeight = outShape[1];
outWidth = outShape[2];
} else if (pad === "same") {
outDepth = Math.ceil(inDepth / strideDepth);
outHeight = Math.ceil(inHeight / strideHeight);
outWidth = Math.ceil(inWidth / strideWidth);
const padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth;
const padAlongHeight = (outHeight - 1) * strideHeight + filterHeight - inHeight;
const padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth;
const front = Math.floor(padAlongDepth / 2);
const back = padAlongDepth - front;
const top = Math.floor(padAlongHeight / 2);
const bottom = padAlongHeight - top;
const left = Math.floor(padAlongWidth / 2);
const right = padAlongWidth - left;
padInfo = { top, bottom, left, right, front, back, type: "SAME" };
} else if (pad === "valid") {
padInfo = {
top: 0,
bottom: 0,
left: 0,
right: 0,
front: 0,
back: 0,
type: "VALID",
};
outDepth = Math.ceil((inDepth - filterDepth + 1) / strideDepth);
outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
} else {
throw Error(`Unknown padding parameter: ${pad}`);
}
return { padInfo, outDepth, outHeight, outWidth };
}
/**
* Rounds a value depending on the rounding mode
* @param value
* @param roundingMode
*/
function conditionalRound(value, roundingMode) {
if (!roundingMode) {
return value;
}
switch (roundingMode) {
case "round":
// used for Caffe Conv
return Math.round(value);
case "ceil":
// used for Caffe Pool
return Math.ceil(value);
case "floor":
return Math.floor(value);
default:
throw new Error(`Unknown roundingMode ${roundingMode}`);
}
}
function tupleValuesAreOne(param) {
const [dimA, dimB, dimC] = parseTupleParam(param);
return dimA === 1 && dimB === 1 && dimC === 1;
}
function eitherStridesOrDilationsAreOne(strides, dilations) {
return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations);
}
/**
* Convert Conv2D dataFormat from 'NHWC'|'NCHW' to
* 'channelsLast'|'channelsFirst'
* @param dataFormat in 'NHWC'|'NCHW' mode
* @return dataFormat in 'channelsLast'|'channelsFirst' mode
* @throws unknown dataFormat
*/
function convertConv2DDataFormat(dataFormat) {
if (dataFormat === "NHWC") {
return "channelsLast";
} else if (dataFormat === "NCHW") {
return "channelsFirst";
} else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
}
//# sourceMappingURL=conv_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d_backprop.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the backprop of a 3d avg pool.
*
* @param dy The dy error, of rank 5 of shape
* [batchSize, depth, height, width, channels].
* assumed.
* @param input The original input image, of rank 5 or rank4 of shape
* [batchSize, depth, height, width, channels].
* @param filterSize The filter size:
* `[filterDepth, filterHeight, filterWidth]`.
* `filterSize` is a single number,
* then `filterDepth == filterHeight == filterWidth`.
* @param strides The strides of the pooling:
* `[strideDepth, strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param dilations Deprecated, this field will be gone in v3.0.0. The dilation
* rates: `[dilationDepth, dilationHeight, dilationWidth]`
* in which we sample input values across the depth, height and width
* dimensions in dilated pooling.
* Defaults to `[1, 1, 1]`. If `dilations` is a single number,
* then `dilationDepth == dilationHeight == dilationWidth`.
* If it is greater than 1, then all values of `strides` must be 1.
* @param pad A string from: 'same', 'valid'. The type of padding algorithm
* used in the forward prop of the op.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
* rounding mode used when computing output dimensions if pad is a
* number. If none is provided, it will not round and error if the output
* is of fractional size.
*/
function avgPool3dBackprop_(
dy,
input,
filterSize,
strides,
dilations = [1, 1, 1],
pad,
dimRoundingMode
) {
const $dy = Object(tensor_util_env["a" /* convertToTensor */])(
dy,
"dy",
"avgPool3dBackprop"
);
const $input = Object(tensor_util_env["a" /* convertToTensor */])(
input,
"input",
"avgPool3dBackprop"
);
let dy5D = $dy;
let input5D = $input;
let reshapedTo5D = false;
if ($input.rank === 4) {
reshapedTo5D = true;
dy5D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]);
input5D = reshape($input, [
1,
$input.shape[0],
$input.shape[1],
$input.shape[2],
$input.shape[3],
]);
}
util["assert"](
dy5D.rank === 5,
() => `Error in avgPool3dBackprop: dy must be rank 5 but got rank ` + `${dy5D.rank}.`
);
util["assert"](
input5D.rank === 5,
() =>
`Error in avgPool3dBackprop: input must be rank 5 but got rank ` + `${input5D.rank}.`
);
util["assert"](
eitherStridesOrDilationsAreOne(strides, dilations),
() =>
"Error in avgPool3dBackprop: Either strides or dilations " +
`must be 1. Got strides ${strides} and dilations '${dilations}'`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in maxPool3dBackprop: pad must be an integer when ` +
`using, dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const forward = (backend) => {
const convInfo = computePool3DInfo(
input5D.shape,
filterSize,
strides,
dilations,
pad,
dimRoundingMode
);
return backend.avgPool3dBackprop(dy5D, input5D, convInfo);
};
const inputs = { dy: dy5D, input: input5D };
const attrs = { filterSize, strides, dilations, pad, dimRoundingMode };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["f" /* AvgPool3DBackprop */],
attrs
);
if (reshapedTo5D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
}
return res;
}
const avgPool3dBackprop = Object(operation["a" /* op */])({ avgPool3dBackprop_ });
//# sourceMappingURL=avg_pool_3d_backprop.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool3D_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const avgPool3DGradConfig = {
kernelName: kernel_names["e" /* AvgPool3D */],
inputsToSave: ["x"],
gradFunc: (dy, saved, attrs) => {
const [x] = saved;
const { filterSize, strides, dilations, pad, dimRoundingMode } = attrs;
const $dilations = dilations == null ? [1, 1, 1] : dilations;
return {
x: () =>
avgPool3dBackprop(dy, x, filterSize, strides, $dilations, pad, dimRoundingMode),
};
},
};
//# sourceMappingURL=AvgPool3D_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_backprop.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the backprop of an 2D avg pool.
*
* @param dy The dy error, of rank 4 or rank 3 of shape
* [batchSize, height, width, channels]. If rank 3, batch of 1 is
* assumed.
* @param input The input image, of rank 4 or rank 3 of shape
* [batchSize, height, width, channels]. If rank 3, batch of 1 is
* assumed.
* @param filterSize The filter size: `[filterHeight, filterWidth]`. If
* `filterSize` is a single number, then `filterHeight == filterWidth`.
* @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param pad A string from: 'same', 'valid'. The type of padding algorithm
* used in the forward prop of the op.
*/
function avgPoolBackprop_(dy, input, filterSize, strides, pad) {
const $dy = Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "avgPoolBackprop");
const $input = Object(tensor_util_env["a" /* convertToTensor */])(
input,
"input",
"avgPoolBackprop"
);
util["assert"](
$input.rank === $dy.rank,
() => `Rank of input (${$input.rank}) does not match rank of dy (${$dy.rank})`
);
let input4D = $input;
let dy4D = $dy;
let reshapedTo4D = false;
if ($input.rank === 3) {
reshapedTo4D = true;
input4D = reshape($input, [1, $input.shape[0], $input.shape[1], $input.shape[2]]);
dy4D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2]]);
}
util["assert"](
dy4D.rank === 4,
() => `Error in avgPoolBackprop: dy must be rank 4 but got rank ` + `${dy4D.rank}.`
);
util["assert"](
input4D.rank === 4,
() => `Error in avgPoolBackprop: input must be rank 4 but got rank ` + `${input4D.rank}.`
);
const forward = (backend) => {
const convInfo = computePool2DInfo(
input4D.shape,
filterSize,
strides,
1 /* dilations */,
pad
);
return backend.avgPoolBackprop(dy4D, input4D, convInfo);
};
const inputs = { dy: dy4D, input: input4D };
const attrs = { filterSize, strides, pad };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["g" /* AvgPoolBackprop */],
attrs
);
if (reshapedTo4D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return res;
}
const avgPoolBackprop = Object(operation["a" /* op */])({ avgPoolBackprop_ });
//# sourceMappingURL=avg_pool_backprop.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const avgPoolGradConfig = {
kernelName: kernel_names["d" /* AvgPool */],
inputsToSave: ["x"],
gradFunc: (dy, saved, attrs) => {
const [x] = saved;
const { filterSize, strides, pad } = attrs;
return {
x: () => avgPoolBackprop(dy, x, filterSize, strides, pad),
};
},
};
//# sourceMappingURL=AvgPool_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the dot product of two matrices, A * B. These must be matrices.
*
* ```js
* const a = tf.tensor2d([1, 2], [1, 2]);
* const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* a.matMul(b).print(); // or tf.matMul(a, b)
* ```
* @param a First matrix in dot product operation.
* @param b Second matrix in dot product operation.
* @param transposeA If true, `a` is transposed before multiplication.
* @param transposeB If true, `b` is transposed before multiplication.
*/
/** @doc {heading: 'Operations', subheading: 'Matrices'} */
function matMul_(a, b, transposeA = false, transposeB = false) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "matMul");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "matMul");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
util["assert"](
$a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank,
() =>
`Error in matMul: inputs must have the same rank of at least 2, ` +
`got ranks ${$a.rank} and ${$b.rank}.`
);
const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];
const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];
const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];
const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];
const outerDimsA = $a.shape.slice(0, -2);
const outerDimsB = $b.shape.slice(0, -2);
const batchDimA = util["sizeFromShape"](outerDimsA);
const batchDimB = util["sizeFromShape"](outerDimsB);
util["assert"](
util["arraysEqual"](outerDimsA, outerDimsB),
() =>
`Error in matMul: outer dimensions (${outerDimsA}) and (` +
`${outerDimsB}) of Tensors with shapes ${$a.shape} and ` +
`${$b.shape} must match.`
);
util["assert"](
innerShapeA === innerShapeB,
() =>
`Error in matMul: inner shapes (${innerShapeA}) and (` +
`${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +
`${$b.shape} and transposeA=${transposeA}` +
` and transposeB=${transposeB} must match.`
);
const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]);
const a3D = transposeA
? reshape($a, [batchDimA, innerShapeA, outerShapeA])
: reshape($a, [batchDimA, outerShapeA, innerShapeA]);
const b3D = transposeB
? reshape($b, [batchDimB, outerShapeB, innerShapeB])
: reshape($b, [batchDimB, innerShapeB, outerShapeB]);
const forward = (backend, save) => {
save([a3D, b3D]);
return backend.batchMatMul(a3D, b3D, transposeA, transposeB);
};
const inputs = { a: a3D, b: b3D };
const attrs = { transposeA, transposeB };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["h" /* BatchMatMul */],
attrs
);
return reshape(res, outShape);
}
const matMul = Object(operation["a" /* op */])({ matMul_ });
//# sourceMappingURL=mat_mul.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/BatchMatMul_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const batchMatMulGradConfig = {
kernelName: kernel_names["h" /* BatchMatMul */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved, attrs) => {
const [a, b] = saved;
const { transposeA, transposeB } = attrs;
if (!transposeA && !transposeB) {
return {
a: () => matMul(dy, b, false, true),
b: () => matMul(a, dy, true, false),
};
} else if (!transposeA && transposeB) {
return {
a: () => matMul(dy, b, false, false),
b: () => matMul(dy, a, true, false),
};
} else if (transposeA && !transposeB) {
return {
a: () => matMul(b, dy, false, true),
b: () => matMul(a, dy, false, false),
};
} else {
return {
a: () => matMul(b, dy, true, true),
b: () => matMul(dy, a, true, true),
};
}
},
};
//# sourceMappingURL=BatchMatMul_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* This operation divides "spatial" dimensions `[1, ..., M]` of the input into
* a grid of blocks of shape `blockShape`, and interleaves these blocks with
* the "batch" dimension (0) such that in the output, the spatial
* dimensions `[1, ..., M]` correspond to the position within the grid,
* and the batch dimension combines both the position within a spatial block
* and the original batch position. Prior to division into blocks,
* the spatial dimensions of the input are optionally zero padded
* according to `paddings`. See below for a precise description.
*
* ```js
* const x = tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]);
* const blockShape = [2, 2];
* const paddings = [[0, 0], [0, 0]];
*
* x.spaceToBatchND(blockShape, paddings).print();
* ```
*
* @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
* remainingShape`, where spatialShape has `M` dimensions.
* @param blockShape A 1-D array. Must have shape `[M]`, all values must
* be >= 1.
* @param paddings A 2-D array. Must have shape `[M, 2]`, all values must be >=
* 0. `paddings[i] = [padStart, padEnd]` specifies the amount to zero-pad
* from input dimension `i + 1`, which corresponds to spatial dimension `i`. It
* is required that
* `(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0`
*
* This operation is equivalent to the following steps:
*
* 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input
* according to `paddings` to produce `padded` of shape paddedShape.
*
* 2. Reshape `padded` to `reshapedPadded` of shape:
* `[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ...,
* paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape`
*
* 3. Permute dimensions of `reshapedPadded` to produce `permutedReshapedPadded`
* of shape: `blockShape + [batch] + [paddedShape[1] / blockShape[0], ...,
* paddedShape[M] / blockShape[M-1]] + remainingShape`
*
* 4. Reshape `permutedReshapedPadded` to flatten `blockShape` into the
* batch dimension, producing an output tensor of shape:
* `[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ...,
* paddedShape[M] / blockShape[M-1]] + remainingShape`
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function spaceToBatchND_(x, blockShape, paddings) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "spaceToBatchND");
util["assert"](
$x.rank >= 1 + blockShape.length,
() => `input rank ${$x.rank} should be > than [blockShape] ${blockShape.length}`
);
util["assert"](
paddings.length === blockShape.length,
() =>
`paddings.shape[0] ${paddings.length} must be equal to [blockShape] ${blockShape.length}`
);
util["assert"](
$x.shape.reduce((a, b, i) => {
if (i > 0 && i <= blockShape.length) {
return a && (b + paddings[i - 1][0] + paddings[i - 1][1]) % blockShape[i - 1] === 0;
}
return a;
}, true),
() =>
`input spatial dimensions ${$x.shape.slice(
1
)} with paddings ${paddings.toString()} must be divisible by blockShapes ${blockShape.toString()}`
);
const forward = (backend) => backend.spaceToBatchND($x, blockShape, paddings);
const inputs = { x: $x };
const attrs = { blockShape, paddings };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["mb" /* SpaceToBatchND */],
attrs
);
}
const spaceToBatchND = Object(operation["a" /* op */])({ spaceToBatchND_ });
//# sourceMappingURL=space_to_batch_nd.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/BatchToSpaceND_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const batchToSpaceNDGradConfig = {
kernelName: kernel_names["i" /* BatchToSpaceND */],
gradFunc: (dy, saved, attrs) => {
const { blockShape, crops } = attrs;
return { x: () => spaceToBatchND(dy, blockShape, crops) };
},
};
//# sourceMappingURL=BatchToSpaceND_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/BroadcastTo_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const broadcastToGradConfig = {
kernelName: kernel_names["j" /* BroadcastTo */],
gradFunc: (dy, saved, attrs) => {
const broadCastToAttrs = attrs;
const inputShape = broadCastToAttrs.inputShape;
const outputShape = broadCastToAttrs.shape;
const reps = Array.from(outputShape);
for (let i = inputShape.length - 1; i >= 0; i--) {
if (inputShape[i] === outputShape[i]) {
reps[i] = 1;
} else if (inputShape[i] !== 1) {
throw new Error(
`broadcastTo(): [${inputShape}] cannot be broadcast to [${outputShape}].`
);
}
}
const axes = [];
for (let i = 0; i < reps.length; i++) {
if (reps[i] > 1) {
axes.push(i);
}
}
return { x: () => sum(dy, axes, true /* keepDims */) };
},
};
//# sourceMappingURL=BroadcastTo_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/split.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Splits a `tf.Tensor` into sub tensors.
*
* If `numOrSizeSplits` is a number, splits `x` along dimension `axis`
* into `numOrSizeSplits` smaller tensors.
* Requires that `numOrSizeSplits` evenly divides `x.shape[axis]`.
*
* If `numOrSizeSplits` is a number array, splits `x` into
* `numOrSizeSplits.length` pieces. The shape of the `i`-th piece has the
* same size as `x` except along dimension `axis` where the size is
* `numOrSizeSplits[i]`.
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [2, 4]);
* const [a, b] = tf.split(x, 2, 1);
* a.print();
* b.print();
*
* const [c, d, e] = tf.split(x, [1, 2, 1], 1);
* c.print();
* d.print();
* e.print();
* ```
*
* @param x The input tensor to split.
* @param numOrSizeSplits Either an integer indicating the number of
* splits along the axis or an array of integers containing the sizes of
* each output tensor along the axis. If a number then it must evenly divide
* `x.shape[axis]`; otherwise the sum of sizes must match `x.shape[axis]`.
* @param axis The dimension along which to split. Defaults to 0 (the first
* dim).
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
function split_(x, numOrSizeSplits, axis = 0) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "split");
const $axis = Object(util["parseAxisParam"])(axis, $x.shape)[0];
let splitSizes;
if (typeof numOrSizeSplits === "number") {
Object(util["assert"])(
$x.shape[$axis] % numOrSizeSplits === 0,
() => "Number of splits must evenly divide the axis."
);
splitSizes = new Array(numOrSizeSplits).fill($x.shape[$axis] / numOrSizeSplits);
} else {
Object(util["assert"])(
$x.shape[$axis] === numOrSizeSplits.reduce((a, b) => a + b),
() => "The sum of sizes must match the size of the axis dimension."
);
splitSizes = numOrSizeSplits;
}
const forward = (backend, _) => {
return backend.split($x, splitSizes, $axis);
};
const inputs = { x: $x };
const attr = { numOrSizeSplits, axis };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["nb" /* SplitV */],
attr
);
}
const split = Object(operation["a" /* op */])({ split_ });
//# sourceMappingURL=split.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Concat_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const concatGradConfig = {
kernelName: kernel_names["l" /* Concat */],
saveAllInputs: true,
gradFunc: (dy, saved, attrs) => {
const shapes = saved.map((t) => t.shape);
const { axis } = attrs;
const $axis = Object(util["parseAxisParam"])(axis, saved[0].shape)[0];
const sizeSplits = shapes.map((s) => s[$axis]);
const derTensors = split(dy, sizeSplits, $axis);
return derTensors.map((t) => () => t);
},
};
//# sourceMappingURL=Concat_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the derivative of the filter of a 2D convolution.
*
* @param x The input tensor, of rank 4 or rank 3 of shape
* [batch, height, width, inChannels]. If rank 3, batch of 1 is assumed.
* @param dy The dy image, of rank 4 or rank 3, of shape
* [batch, height, width, outDepth]. If rank 3, batch of 1 is assumed.
* @param filterShape The shape of the filter, length 4,
* [filterHeight, filterWidth, inDepth, outDepth].
* @param strides The strides of the convolution: [strideHeight,
* strideWidth].
* @param pad A string from: 'same', 'valid'. The type of padding algorithm
* used in the forward prop of the op.
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels].
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
* rounding mode used when computing output dimensions if pad is a
* number. If none is provided, it will not round and error if the output
* is of fractional size.
*/
function conv2DBackpropFilter_(
x,
dy,
filterShape,
strides,
pad,
dataFormat = "NHWC",
dimRoundingMode
) {
let x4D = x;
if (x.rank === 3) {
x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
}
let dy4D = dy;
if (dy4D.rank === 3) {
dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
}
util["assert"](
x4D.rank === 4,
() => `Error in conv2dDerFilter: input must be rank 4, but got shape ` + `${x4D.shape}.`
);
util["assert"](
dy4D.rank === 4,
() => `Error in conv2dDerFilter: dy must be rank 4, but got shape ` + `${dy4D.shape}.`
);
util["assert"](
filterShape.length === 4,
() =>
`Error in conv2dDerFilter: filterShape must be length 4, but got ` + `${filterShape}.`
);
const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1];
const outDepth = dataFormat === "NHWC" ? dy4D.shape[3] : dy4D.shape[1];
util["assert"](
inDepth === filterShape[2],
() =>
`Error in conv2dDerFilter: depth of input ${inDepth}) must ` +
`match input depth in filter (${filterShape[2]}.`
);
util["assert"](
outDepth === filterShape[3],
() =>
`Error in conv2dDerFilter: depth of dy (${outDepth}) must ` +
`match output depth for filter (${filterShape[3]}).`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in conv2dDerFilter: pad must be an integer when using, ` +
`dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const forward = (backend) => {
const dilations = 1;
const $dataFormat = convertConv2DDataFormat(dataFormat);
const convInfo = computeConv2DInfo(
x4D.shape,
filterShape,
strides,
dilations,
pad,
dimRoundingMode,
false,
$dataFormat
);
return backend.conv2dDerFilter(x4D, dy4D, convInfo);
};
const inputs = { x: x4D, dy: dy4D };
const attrs = { strides, pad, dataFormat, dimRoundingMode };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["n" /* Conv2DBackpropFilter */],
attrs
);
}
const conv2DBackpropFilter = Object(operation["a" /* op */])({ conv2DBackpropFilter_ });
//# sourceMappingURL=conv2d_backprop_filter.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the derivative of the input of a 2D convolution.
*
* @param xShape The shape of the input: [batch, height, width, inDepth].
* If length of 3, batch of 1 is assumed.
* @param dy The derivative of the output, of rank 4 or rank 3 of shape
* `[batch, outHeight, outWidth, outDepth]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter, rank 4, of shape
* `[filterHeight, filterWidth, inDepth, outDepth]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`.
* @param pad The type of padding algorithm used:
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels].
* @param dimRoundingMode The rounding mode used when computing output
* dimensions if pad is a number. If none is provided, it will not round
* and error if the output is of fractional size.
*/
function conv2DBackpropInput_(
xShape,
dy,
filter,
strides,
pad,
dataFormat = "NHWC",
dimRoundingMode
) {
util["assert"](
xShape.length === dy.rank,
() => `Length of inShape ` + `(${xShape.length}) and rank of dy (${dy.rank}) must match`
);
let xShape4D = xShape;
let dy4D = dy;
let reshapedTo4D = false;
if (dy.rank === 3) {
reshapedTo4D = true;
dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
xShape4D = [1, xShape[0], xShape[1], xShape[2]];
}
util["assert"](
xShape4D.length === 4,
() =>
`Error in conv2dDerInput: inShape must be length 4, but got length ` +
`${xShape4D.length}.`
);
util["assert"](
dy4D.rank === 4,
() => `Error in conv2dDerInput: dy must be rank 4, but got ` + `rank ${dy4D.rank}`
);
util["assert"](
filter.rank === 4,
() => `Error in conv2dDerInput: filter must be rank 4, but got ` + `rank ${filter.rank}`
);
const inDepth = dataFormat === "NHWC" ? xShape4D[3] : xShape4D[1];
const outDepth = dataFormat === "NHWC" ? dy4D.shape[3] : dy4D.shape[1];
util["assert"](
inDepth === filter.shape[2],
() =>
`Error in conv2dDerInput: depth of input (${inDepth}) must ` +
`match input depth for filter ${filter.shape[2]}.`
);
util["assert"](
outDepth === filter.shape[3],
() =>
`Error in conv2dDerInput: depth of output (${outDepth}) must ` +
`match output depth for filter ${filter.shape[3]}.`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in conv2dDerInput: pad must be an integer when using, ` +
`dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const forward = (backend, save) => {
const dilations = 1;
const $dataFormat = convertConv2DDataFormat(dataFormat);
const convInfo = computeConv2DInfo(
xShape4D,
filter.shape,
strides,
dilations,
pad,
dimRoundingMode,
false,
$dataFormat
);
const res = backend.conv2dDerInput(dy4D, filter, convInfo);
save([dy4D, filter]);
return res;
};
const inputs = { dy: dy4D, filter };
const attrs = { strides, pad, dataFormat, dimRoundingMode };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["o" /* Conv2DBackpropInput */],
attrs
);
if (reshapedTo4D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return res;
}
const conv2DBackpropInput = Object(operation["a" /* op */])({ conv2DBackpropInput_ });
//# sourceMappingURL=conv2d_backprop_input.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2D_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const conv2DGradConfig = {
kernelName: kernel_names["m" /* Conv2D */],
inputsToSave: ["x", "filter"],
gradFunc: (dy, saved, attrs) => {
const [x4D, $filter] = saved;
const { dilations, strides, pad, dataFormat } = attrs;
util["assert"](
tupleValuesAreOne(dilations),
() =>
"Error in gradient of conv2D: dilation rates greater than 1 " +
`are not yet supported in gradients. Got dilations '${dilations}'`
);
return {
x: () => conv2DBackpropInput(x4D.shape, dy, $filter, strides, pad, dataFormat),
filter: () => conv2DBackpropFilter(x4D, dy, $filter.shape, strides, pad, dataFormat),
};
},
};
//# sourceMappingURL=Conv2D_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes a 2D convolution over the input x.
*
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter, rank 4, of shape
* `[filterHeight, filterWidth, inDepth, outDepth]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_guides/python/nn#Convolution](
* https://www.tensorflow.org/api_guides/python/nn#Convolution)
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels].
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dimRoundingMode The rounding mode used when computing output
* dimensions if pad is a number. If none is provided, it will not round
* and error if the output is of fractional size.
*/
/** @doc {heading: 'Operations', subheading: 'Convolution'} */
function conv2d_(
x,
filter,
strides,
pad,
dataFormat = "NHWC",
dilations = [1, 1],
dimRoundingMode
) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "conv2d");
const $filter = Object(tensor_util_env["a" /* convertToTensor */])(
filter,
"filter",
"conv2d"
);
let x4D = $x;
let reshapedTo4D = false;
if ($x.rank === 3) {
reshapedTo4D = true;
x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
}
util["assert"](
x4D.rank === 4,
() => `Error in conv2d: input must be rank 4, but got rank ${x4D.rank}.`
);
util["assert"](
$filter.rank === 4,
() => `Error in conv2d: filter must be rank 4, but got rank ` + `${$filter.rank}.`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in conv2d: pad must be an integer when using, ` +
`dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1];
util["assert"](
inDepth === $filter.shape[2],
() =>
`Error in conv2d: depth of input (${inDepth}) must match ` +
`input depth for filter ${$filter.shape[2]}.`
);
util["assert"](
eitherStridesOrDilationsAreOne(strides, dilations),
() =>
"Error in conv2D: Either strides or dilations must be 1. " +
`Got strides ${strides} and dilations '${dilations}'`
);
const forward = (backend, save) => {
const $dataFormat = convertConv2DDataFormat(dataFormat);
const convInfo = computeConv2DInfo(
x4D.shape,
$filter.shape,
strides,
dilations,
pad,
dimRoundingMode,
false,
$dataFormat
);
const res = backend.conv2d(x4D, $filter, convInfo);
save([x4D, $filter]);
return res;
};
const inputs = { x: x4D, filter: $filter };
const attrs = { strides, pad, dataFormat, dilations, dimRoundingMode };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["m" /* Conv2D */],
attrs
);
if (reshapedTo4D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return res;
}
const conv2d = Object(operation["a" /* op */])({ conv2d_ });
//# sourceMappingURL=conv2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2DBackpropInput_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const conv2DBackpropInputGradConfig = {
kernelName: kernel_names["o" /* Conv2DBackpropInput */],
inputsToSave: ["dy", "filter"],
gradFunc: (ddx, saved, attrs) => {
const [dy, filter] = saved;
const { strides, pad, dataFormat, dimRoundingMode } = attrs;
return {
dy: () =>
conv2d(ddx, filter, strides, pad, dataFormat, 1 /* dilations */, dimRoundingMode),
filter: () =>
conv2DBackpropFilter(
ddx,
dy,
filter.shape,
strides,
pad,
dataFormat,
dimRoundingMode
),
};
},
};
//# sourceMappingURL=Conv2DBackpropInput_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_filter.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the derivative of the filter of a 3D convolution.
*
* @param x The input tensor, of rank 5 or rank 4 of shape
* [batch, depth, height, width, inChannels]. If rank 4, batch of 1 is
* assumed.
* @param dy The dy image, of rank 5 or rank 4, of shape
* [batch, depth, height, width, outDepth]. If rank 4, batch of 1 is
* assumed.
* @param filterShape The shape of the filter, length 5,
* [filterDepth, filterHeight, filterWidth, inDepth, outDepth].
* @param strides The strides of the convolution: [strideDepth, strideHeight,
* strideWidth].
* @param pad A string from: 'same', 'valid'. The type of padding algorithm
* used in the forward prop of the op.
*/
function conv3DBackpropFilter_(x, dy, filterShape, strides, pad) {
let x5D = x;
if (x.rank === 4) {
x5D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2], x.shape[3]]);
}
let dy5D = dy;
if (dy5D.rank === 4) {
dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);
}
util["assert"](
x5D.rank === 5,
() => `Error in conv3dDerFilter: input must be rank 5, but got shape ` + `${x5D.shape}.`
);
util["assert"](
dy5D.rank === 5,
() => `Error in conv3dDerFilter: dy must be rank 5, but got shape ` + `${dy5D.shape}.`
);
util["assert"](
filterShape.length === 5,
() =>
`Error in conv3dDerFilter: filterShape must be length 5, but got ` + `${filterShape}.`
);
util["assert"](
x5D.shape[4] === filterShape[3],
() =>
`Error in conv3dDerFilter: depth of input ${x5D.shape[4]}) must ` +
`match input depth in filter (${filterShape[3]}.`
);
util["assert"](
dy5D.shape[4] === filterShape[4],
() =>
`Error in conv3dDerFilter: depth of dy (${dy5D.shape[4]}) must ` +
`match output depth for filter (${filterShape[4]}).`
);
const forward = (backend) => {
const dilations = 1;
const convInfo = computeConv3DInfo(x5D.shape, filterShape, strides, dilations, pad);
return backend.conv3dDerFilter(x5D, dy5D, convInfo);
};
const inputs = { x: x5D, y: dy5D };
const attrs = { strides, pad };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["q" /* Conv3DBackpropFilterV2 */],
attrs
);
}
const conv3DBackpropFilter = Object(operation["a" /* op */])({ conv3DBackpropFilter_ });
//# sourceMappingURL=conv3d_backprop_filter.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the derivative of the input of a 3D convolution.
*
* @param xShape The shape of the input: [batch, depth, height, width,
* in_channels]. If length of 4, batch of 1 is assumed.
* @param dy The derivative of the output, of rank 5 or rank 4 of shape
* `[batch, outDepth, outHeight, outWidth, in_channels]`.
* If rank 4, batch of 1 is assumed.
* @param filter The filter, rank 5, of shape
* `[filterDepth, filterHeight, filterWidth, inDepth, outDepth]`.
* @param strides The strides of the convolution: `[strideDepth, strideHeight,
* strideWidth]`.
* @param pad The type of padding algorithm used:
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
*/
function conv3DBackpropInput_(xShape, dy, filter, strides, pad) {
util["assert"](
xShape.length === dy.rank,
() => `Length of inShape ` + `(${xShape.length}) and rank of dy (${dy.rank}) must match`
);
let xShape5D = xShape;
let dy5D = dy;
let reshapedTo5D = false;
if (dy.rank === 4) {
reshapedTo5D = true;
dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);
xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]];
}
const inDepth = xShape5D[4];
const outDepth = dy5D.shape[4];
util["assert"](
xShape5D.length === 5,
() =>
`Error in conv3dDerInput: inShape must be length 5, but got length ` +
`${xShape5D.length}.`
);
util["assert"](
dy5D.rank === 5,
() => `Error in conv3dDerInput: dy must be rank 5, but got ` + `rank ${dy5D.rank}`
);
util["assert"](
filter.rank === 5,
() => `Error in conv3dDerInput: filter must be rank 5, but got ` + `rank ${filter.rank}`
);
util["assert"](
inDepth === filter.shape[3],
() =>
`Error in conv3dDerInput: depth of input (${inDepth}) must ` +
`match input depth for filter ${filter.shape[3]}.`
);
util["assert"](
outDepth === filter.shape[4],
() =>
`Error in conv3dDerInput: depth of output (${outDepth}) must ` +
`match output depth for filter ${filter.shape[4]}.`
);
const forward = (backend) => {
const dilations = 1;
const convInfo = computeConv3DInfo(xShape5D, filter.shape, strides, dilations, pad);
return backend.conv3dDerInput(dy5D, filter, convInfo);
};
const inputs = { dy: dy5D };
const attrs = { pad };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["r" /* Conv3DBackpropInputV2 */],
attrs
);
if (reshapedTo5D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
}
return res;
}
const conv3DBackpropInput = Object(operation["a" /* op */])({ conv3DBackpropInput_ });
//# sourceMappingURL=conv3d_backprop_input.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Conv3D_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const conv3DGradConfig = {
kernelName: kernel_names["p" /* Conv3D */],
inputsToSave: ["x", "filter"],
gradFunc: (dy, saved, attrs) => {
const { dilations, strides, pad } = attrs;
util["assert"](
tupleValuesAreOne(dilations),
() =>
"Error in gradient of conv3D: dilation rates greater than 1 are " +
`not yet supported in gradients. Got dilations '${dilations}'`
);
const [x5D, $filter] = saved;
return {
x: () => conv3DBackpropInput(x5D.shape, dy, $filter, strides, pad),
filter: () => conv3DBackpropFilter(x5D, dy, $filter.shape, strides, pad),
};
},
};
//# sourceMappingURL=Conv3D_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.
*
* The returned `tf.Tensor`'s dimension `i` will correspond to the input
* dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,
* where `n` is the rank of the input `tf.Tensor`. Hence by default, this
* operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.
*
* ```js
* const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);
*
* a.transpose().print(); // or tf.transpose(a)
* ```
*
* @param x The tensor to transpose.
* @param perm The permutation of the dimensions of a.
*/
/** @doc {heading: 'Operations', subheading: 'Matrices'} */
function transpose_(x, perm) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "transpose");
if (perm == null) {
perm = $x.shape.map((s, i) => i).reverse();
}
util["assert"](
$x.rank === perm.length,
() =>
`Error in transpose: rank of input ${$x.rank} ` + `must match length of perm ${perm}.`
);
perm.forEach((axis) => {
util["assert"](
axis >= 0 && axis < $x.rank,
() => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` + ` but got ${perm}`
);
});
if ($x.rank <= 1) {
return $x.clone();
}
const attrs = { perm };
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.transpose($x, perm),
{ x: $x },
null /* gradient */,
"Transpose",
attrs
);
}
const transpose = Object(operation["a" /* op */])({ transpose_ });
//# sourceMappingURL=transpose.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the cumulative sum of a `tf.Tensor` along `axis`.
*
* ```js
* const x = tf.tensor([1, 2, 3, 4]);
* x.cumsum().print();
* ```
* ```js
* const x = tf.tensor([[1, 2], [3, 4]]);
* x.cumsum().print();
* ```
*
* @param x The input tensor to be summed.
* @param axis The axis along which to sum. Optional. Defaults to 0.
* @param exclusive Whether to perform exclusive cumulative sum. Optional.
* Defaults to false. If set to true then the sum of each tensor entry
* does not include its own value, but only the values previous to it
* along the specified axis.
* @param reverse Whether to sum in the opposite direction. Optional.
* Defaults to false.
*/
/** @doc {heading: 'Operations', subheading: 'Scan'} */
function cumsum_(x, axis = 0, exclusive = false, reverse = false) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "cumsum");
const forward = (backend, save) => {
const permutation = getAxesPermutation([axis], $x.rank);
let permutedX = $x;
if (permutation != null) {
permutedX = transpose($x, permutation);
}
const permutedAxis = getInnerMostAxes(1, $x.rank)[0];
let value = backend.cumsum(permutedX, permutedAxis, exclusive, reverse);
save([$x]);
if (permutation != null) {
value = transpose(value, permutation);
}
return value;
};
const inputs = { x: $x };
const attrs = { axis, exclusive, reverse };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["s" /* Cumsum */],
attrs
);
}
const cumsum = Object(operation["a" /* op */])({ cumsum_ });
//# sourceMappingURL=cumsum.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Cumsum_grad.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const cumsumGradConfig = {
kernelName: kernel_names["s" /* Cumsum */],
inputsToSave: ["x"],
gradFunc: (dy, saved, attrs) => {
const [x] = saved;
const { axis, exclusive, reverse } = attrs;
return {
x: () => {
const permutation = getAxesPermutation([axis], x.rank);
let out = cumsum(dy, axis, exclusive, !reverse);
if (permutation != null) {
out = transpose(out, permutation);
}
return out;
},
};
},
};
//# sourceMappingURL=Cumsum_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, convInfo) {
let x4D = x;
if (x.rank === 3) {
x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
}
let dy4D = dy;
if (dy4D.rank === 3) {
dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
}
const forward = (backend) => backend.depthwiseConv2DDerFilter(x4D, dy4D, convInfo);
const inputs = { x: x4D, dy: dy4D };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["v" /* DepthwiseConv2dNativeBackpropFilter */]
);
}
const depthwiseConv2dNativeBackpropFilter = Object(operation["a" /* op */])({
depthwiseConv2dNativeBackpropFilter_,
});
//# sourceMappingURL=depthwise_conv2d_native_backprop_filter.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, convInfo) {
let dy4D = dy;
let reshapedTo4D = false;
if (dy.rank === 3) {
reshapedTo4D = true;
dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
}
const forward = (backend) => backend.depthwiseConv2DDerInput(dy4D, filter, convInfo);
const inputs = { dy: dy4D };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["w" /* DepthwiseConv2dNativeBackpropInput */]
);
if (reshapedTo4D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return res;
}
const depthwiseConv2dNativeBackpropInput = Object(operation["a" /* op */])({
depthwiseConv2dNativeBackpropInput_,
});
//# sourceMappingURL=depthwise_conv2d_native_backprop_input.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/DepthwiseConv2dNative_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const depthwiseConv2dNativeGradConfig = {
kernelName: kernel_names["u" /* DepthwiseConv2dNative */],
inputsToSave: ["x", "filter"],
gradFunc: (dy, saved, attrs) => {
const { dilations, strides, pad, dimRoundingMode } = attrs;
const $dilations = dilations == null ? [1, 1] : dilations;
util["assert"](
tupleValuesAreOne($dilations),
() =>
"Error in gradient of depthwiseConv2dNative: dilation rates " +
`greater than 1 are not yet supported. Got dilations ` +
`'${$dilations}'`
);
const [x, filter] = saved;
util["assert"](
x.rank === 4,
() =>
`Error in gradient of depthwiseConv2dNative: input must be ` +
`rank 4, but got rank ${x.rank}.`
);
util["assert"](
filter.rank === 4,
() =>
`Error in gradient of depthwiseConv2dNative: filter must be ` +
`rank 4, but got rank ${filter.rank}.`
);
util["assert"](
x.shape[3] === filter.shape[2],
() =>
`Error in gradient of depthwiseConv2d: number of input ` +
`channels (${x.shape[3]}) must match the inChannels dimension ` +
`in filter ${filter.shape[2]}.`
);
util["assert"](
eitherStridesOrDilationsAreOne(strides, $dilations),
() =>
"Error in gradient of depthwiseConv2d: Either strides or " +
`dilations must be 1. Got strides ${strides} and dilations ` +
`'${$dilations}'.`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in depthwiseConv2d: pad must be an integer when using, ` +
`dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const convInfo = computeConv2DInfo(
x.shape,
filter.shape,
strides,
$dilations,
pad,
dimRoundingMode,
true /* depthwise */
);
return {
x: () => depthwiseConv2dNativeBackpropInput(x.shape, dy, filter, convInfo),
filter: () => depthwiseConv2dNativeBackpropFilter(x, dy, filter.shape, convInfo),
};
},
};
//# sourceMappingURL=DepthwiseConv2dNative_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Div_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const divGradConfig = {
kernelName: kernel_names["y" /* Div */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
const res = div(dy, b.toFloat());
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
return sum(res, reduceAxes).reshape(a.shape);
}
return res;
};
const derB = () => {
let res = mul(dy, a.toFloat());
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
res = reshape(sum(res, reduceAxes), b.shape);
}
const tmp = square(b);
return neg(div(res, tmp.toFloat()));
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Div_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Elu_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const eluGradConfig = {
kernelName: kernel_names["z" /* Elu */],
outputsToSave: [true],
gradFunc: (dy, saved) => {
const [y] = saved;
const backPropKernelFunc = (backend) => {
return backend.eluDer(dy, y);
};
const inputs = { dy, y };
return {
x: () =>
engine["a" /* ENGINE */].runKernelFunc(
backPropKernelFunc,
inputs,
null /* grad */,
kernel_names["A" /* EluGrad */]
),
};
},
};
//# sourceMappingURL=Elu_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/FloorDiv_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const floorDivGradConfig = {
kernelName: kernel_names["D" /* FloorDiv */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
const res = dy.div(b.toFloat());
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
return res.sum(reduceAxes).reshape(a.shape);
}
return res;
};
const derB = () => {
let res = dy.mul(a.toFloat());
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
res = res.sum(reduceAxes).reshape(b.shape);
}
const tmp = b.square();
return res.div(tmp.toFloat()).neg();
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=FloorDiv_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sub.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([10, 20, 30, 40]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.sub(b).print(); // or tf.sub(a, b)
* ```
*
* ```js
* // Broadcast subtract a with b.
* const a = tf.tensor1d([10, 20, 30, 40]);
* const b = tf.scalar(5);
*
* a.sub(b).print(); // or tf.sub(a, b)
* ```
* @param a The first `tf.Tensor` to subtract from.
* @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as
* `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
function sub_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "sub");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "sub");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
const forward = (backend, save) => {
const res = backend.subtract($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["qb" /* Sub */]
);
}
const sub = Object(operation["a" /* op */])({ sub_ });
//# sourceMappingURL=sub.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tile.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Construct a tensor by repeating it the number of times given by reps.
*
* This operation creates a new tensor by replicating `input` `reps`
* times. The output tensor's i'th dimension has `input.shape[i] *
* reps[i]` elements, and the values of `input` are replicated
* `reps[i]` times along the i'th dimension. For example, tiling
* `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.
*
* ```js
* const a = tf.tensor1d([1, 2]);
*
* a.tile([2]).print(); // or a.tile([2])
* ```
*
* ```js
* const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* a.tile([1, 2]).print(); // or a.tile([1, 2])
* ```
* @param x The tensor to tile.
* @param reps Determines the number of replications per dimension.
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
function tile_(x, reps) {
const parseAs = null;
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "tile", parseAs);
util["assert"](
$x.rank === reps.length,
() =>
`Error in transpose: rank of input ${$x.rank} ` + `must match length of reps ${reps}.`
);
const forward = (backend, save) => {
const res = backend.tile($x, reps);
save([$x]);
return res;
};
const inputsToSave = [$x];
const inputs = { x: $x };
const attrs = { reps };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["rb" /* Tile */],
attrs,
inputsToSave
);
}
const tile = Object(operation["a" /* op */])({ tile_ });
//# sourceMappingURL=tile.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/FusedBatchNorm_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const fusedBatchNormGradConfig = {
kernelName: kernel_names["F" /* FusedBatchNorm */],
inputsToSave: ["x", "mean", "variance", "scale"],
gradFunc: (dy, saved, attrs) => {
const { varianceEpsilon } = attrs;
const [x, mean, variance, scale] = saved;
const scaleValue = scale == null ? Object(tensor_ops["e" /* scalar */])(1) : scale;
const reductionAxes = getReductionAxes(mean.shape, x.shape);
const tileShape = [];
if (mean.rank === 1) {
for (let i = 0; i < x.shape.length - 1; ++i) {
tileShape.push(x.shape[i]);
}
tileShape.push(1);
}
const xMinusMean = sub(x, mean);
const dyTimesScaleValue = mul(dy, scaleValue);
const oneOverSqrtVariance = rsqrt(
add(variance, Object(tensor_ops["e" /* scalar */])(varianceEpsilon))
);
const minusHalfRCube = mul(
mul(mul(oneOverSqrtVariance, oneOverSqrtVariance), oneOverSqrtVariance),
Object(tensor_ops["e" /* scalar */])(-0.5)
);
const derX = () => {
if (mean.rank === 1) {
return reshape(
mul(
mul(dy, tile(oneOverSqrtVariance.as4D(1, 1, 1, mean.shape[0]), tileShape)),
scaleValue
),
x.shape
);
} else {
return reshape(mul(mul(dy, oneOverSqrtVariance), scaleValue), x.shape);
}
};
const derMean = () => {
let meanDer = mul(
mul(oneOverSqrtVariance, Object(tensor_ops["e" /* scalar */])(-1)),
dyTimesScaleValue
);
if (mean.rank === 1) {
meanDer = sum(meanDer, reductionAxes);
}
return reshape(meanDer, mean.shape);
};
const derVariance = () => {
let varianceDer = mul(mul(minusHalfRCube, xMinusMean), dyTimesScaleValue);
if (mean.rank === 1) {
varianceDer = sum(varianceDer, reductionAxes);
}
return reshape(varianceDer, mean.shape);
};
const derScale = () => {
const xMinusMean2TimesRsqrt = mul(xMinusMean, oneOverSqrtVariance);
let scaleDer = mul(dy, xMinusMean2TimesRsqrt);
if (mean.rank === 1) {
scaleDer = sum(scaleDer, reductionAxes);
}
return reshape(scaleDer, mean.shape);
};
const derOffset = () => {
let offsetDer = dy;
if (mean.rank === 1) {
offsetDer = sum(offsetDer, reductionAxes);
}
return reshape(offsetDer, mean.shape);
};
return {
x: derX,
mean: derMean,
variance: derVariance,
scale: derScale,
offset: derOffset,
};
},
};
//# sourceMappingURL=FusedBatchNorm_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/GreaterEqual_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const greaterEqualGradConfig = {
kernelName: kernel_names["I" /* GreaterEqual */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
return {
a: () => Object(tensor_ops["o" /* zerosLike */])(a),
b: () => Object(tensor_ops["o" /* zerosLike */])(b),
};
},
};
//# sourceMappingURL=GreaterEqual_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Identity_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const identityGradConfig = {
kernelName: kernel_names["J" /* Identity */],
gradFunc: (dy) => {
return { x: () => dy.toFloat() };
},
};
//# sourceMappingURL=Identity_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization_backprop.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function localResponseNormalizationBackprop_(
x,
y,
dy,
depthRadius = 5,
bias = 1,
alpha = 1,
beta = 0.5
) {
const forward = (backend) => backend.LRNGrad(dy, x, y, depthRadius, bias, alpha, beta);
const inputs = { x, y, dy };
const attrs = { depthRadius, bias, alpha, beta };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["M" /* LRNBackprop */],
attrs
);
}
const localResponseNormalizationBackprop = Object(operation["a" /* op */])({
localResponseNormalizationBackprop_,
});
//# sourceMappingURL=local_response_normalization_backprop.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/LRN_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const lrnGradConfig = {
kernelName: kernel_names["L" /* LRN */],
inputsToSave: ["x"],
outputsToSave: [true],
gradFunc: (dy, saved, attrs) => {
const [x, y] = saved;
const { depthRadius, bias, alpha, beta } = attrs;
return {
x: () => localResponseNormalizationBackprop(x, y, dy, depthRadius, bias, alpha, beta),
};
},
};
//# sourceMappingURL=LRN_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Max_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const maxGradConfig = {
kernelName: kernel_names["P" /* Max */],
inputsToSave: ["x"],
outputsToSave: [true],
gradFunc: (dy, saved, attrs) => {
const maxAttrs = attrs;
const { reductionIndices } = maxAttrs;
const [x, y] = saved;
const origAxes = util["parseAxisParam"](reductionIndices, x.shape);
const permutedAxes = getAxesPermutation(origAxes, x.rank);
const maxGrad = gradForMinAndMax(dy, y, x, origAxes, permutedAxes);
return {
x: () => {
let out = maxGrad["x"]();
if (permutedAxes != null) {
out = transpose(out);
}
return out;
},
};
},
};
//# sourceMappingURL=Max_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the truth value of (a >= b) element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 2, 3]);
* const b = tf.tensor1d([2, 2, 2]);
*
* a.greaterEqual(b).print();
* ```
*
* @param a The first input tensor.
* @param b The second input tensor. Must have the same dtype as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function greaterEqual_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "greaterEqual");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "greaterEqual");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
assertAndGetBroadcastShape($a.shape, $b.shape);
const forward = (backend, save) => {
const res = backend.greaterEqual($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["I" /* GreaterEqual */]
);
}
const greaterEqual = Object(operation["a" /* op */])({ greaterEqual_ });
//# sourceMappingURL=greater_equal.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/less.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the truth value of (a < b) element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 2, 3]);
* const b = tf.tensor1d([2, 2, 2]);
*
* a.less(b).print();
* ```
* @param a The first input tensor.
* @param b The second input tensor. Must have the same dtype as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function less_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "less");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "less");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
assertAndGetBroadcastShape($a.shape, $b.shape);
const forward = (backend) => backend.less($a, $b);
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["N" /* Less */]
);
}
const less = Object(operation["a" /* op */])({ less_ });
//# sourceMappingURL=less.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Maximum_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const maximumGradConfig = {
kernelName: kernel_names["V" /* Maximum */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const derA = () => mul(dy, cast(greaterEqual(a, b), "float32"));
const derB = () => mul(dy, cast(less(a, b), "float32"));
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Maximum_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d_backprop.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the backprop of a 3d max pool.
*
* @param dy The dy error, of rank 5 of shape
* [batchSize, depth, height, width, channels].
* assumed.
* @param input The original input image, of rank 5 or rank 4 of shape
* [batchSize, depth, height, width, channels].
* @param output The original output image, of rank 5 of shape
* [batchSize, outDepth, outHeight, outWidth, channels].
* @param filterSize The filter size:
* `[filterDepth, filterHeight, filterWidth]`.
* `filterSize` is a single number,
* then `filterDepth == filterHeight == filterWidth`.
* @param strides The strides of the pooling:
* `[strideDepth, strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param dilations Deprecated, this field will be gone in v3.0.0.
* The dilation rates: `[dilationDepth, dilationHeight, dilationWidth]`
* in which we sample input values across the depth, height and width
* dimensions in dilated pooling.
* Defaults to `[1, 1, 1]`. If `dilations` is a single number,
* then `dilationDepth == dilationHeight == dilationWidth`.
* If it is greater than 1, then all values of `strides` must be 1.
* @param pad A string from: 'same', 'valid'. The type of padding algorithm
* used in the forward prop of the op.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
* rounding mode used when computing output dimensions if pad is a
* number. If none is provided, it will not round and error if the output
* is of fractional size.
*/
function maxPool3dBackprop_(
dy,
input,
output,
filterSize,
strides,
dilations = [1, 1, 1],
pad,
dimRoundingMode
) {
const $dy = Object(tensor_util_env["a" /* convertToTensor */])(
dy,
"dy",
"maxPool3dBackprop"
);
const $input = Object(tensor_util_env["a" /* convertToTensor */])(
input,
"input",
"maxPool3dBackprop"
);
const $output = Object(tensor_util_env["a" /* convertToTensor */])(
output,
"output",
"maxPool3dBackprop"
);
let dy5D = $dy;
let input5D = $input;
let output5D = $output;
let reshapedTo5D = false;
if ($input.rank === 4) {
reshapedTo5D = true;
dy5D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]);
input5D = reshape($input, [
1,
$input.shape[0],
$input.shape[1],
$input.shape[2],
$input.shape[3],
]);
output5D = reshape($output, [
1,
$output.shape[0],
$output.shape[1],
$output.shape[2],
$output.shape[3],
]);
}
util["assert"](
dy5D.rank === 5,
() => `Error in maxPool3dBackprop: dy must be rank 5 but got rank ` + `${dy5D.rank}.`
);
util["assert"](
input5D.rank === 5,
() =>
`Error in maxPool3dBackprop: input must be rank 5 but got rank ` + `${input5D.rank}.`
);
util["assert"](
output5D.rank === 5,
() =>
`Error in maxPool3dBackprop: output must be rank 5 but got rank ` + `${output5D.rank}.`
);
util["assert"](
eitherStridesOrDilationsAreOne(strides, dilations),
() =>
"Error in maxPool3dBackprop: Either strides or dilations " +
`must be 1. Got strides ${strides} and dilations '${dilations}'`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in maxPool3dBackprop: pad must be an integer when ` +
`using, dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const forward = (backend) => {
const convInfo = computePool3DInfo(
input5D.shape,
filterSize,
strides,
dilations,
pad,
dimRoundingMode
);
return backend.maxPool3dBackprop(dy5D, input5D, output5D, convInfo);
};
const inputs = { dy: dy5D, input: input5D, output: output5D };
const attrs = { filterSize, strides, dilations, pad, dimRoundingMode };
const res = engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["S" /* MaxPool3DBackprop */],
attrs
);
if (reshapedTo5D) {
return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
}
return res;
}
const maxPool3dBackprop = Object(operation["a" /* op */])({ maxPool3dBackprop_ });
//# sourceMappingURL=max_pool_3d_backprop.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool3D_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const maxPool3DGradConfig = {
kernelName: kernel_names["R" /* MaxPool3D */],
inputsToSave: ["x"],
outputsToSave: [true],
gradFunc: (dy, saved, attrs) => {
const [x, y] = saved;
const { filterSize, strides, dilations, pad, dimRoundingMode } = attrs;
const $dilations = dilations == null ? [1, 1, 1] : dilations;
return {
x: () =>
maxPool3dBackprop(dy, x, y, filterSize, strides, $dilations, pad, dimRoundingMode),
};
},
};
//# sourceMappingURL=MaxPool3D_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_backprop.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the backprop of a 2D max pool.
*
* @param dy The dy error, of rank 4 or rank 3 of shape
* [batchSize, height, width, channels]. If rank 3, batch of 1 is
* assumed.
* @param input The original input image, of rank 4, of shape
* [batchSize, height, width, channels].
* @param output The original output image, of rank 4, of shape
* [batchSize, outHeight, outWidth, channels].
* @param filterSize The filter size: `[filterHeight, filterWidth]`. If
* `filterSize` is a single number, then `filterHeight == filterWidth`.
* @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param pad A string from: 'same', 'valid'. The type of padding algorithm
* used in the forward prop of the op.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. The
* rounding mode used when computing output dimensions if pad is a
* number. If none is provided, it will not round and error if the output
* is of fractional size.
*/
function maxPoolBackprop_(dy, input, output, filterSize, strides, pad, dimRoundingMode) {
const $dy = Object(tensor_util_env["a" /* convertToTensor */])(dy, "dy", "maxPoolBackprop");
const $input = Object(tensor_util_env["a" /* convertToTensor */])(
input,
"input",
"maxPoolBackprop"
);
const $output = Object(tensor_util_env["a" /* convertToTensor */])(
output,
"output",
"maxPoolBackprop"
);
util["assert"](
$input.rank === $dy.rank,
() => `Rank of input (${$input.rank}) does not match rank of dy ` + `(${$dy.rank})`
);
util["assert"](
$dy.rank === 4,
() => `Error in maxPoolBackprop: dy must be rank 4 but got rank ` + `${$dy.rank}.`
);
util["assert"](
$input.rank === 4,
() => `Error in maxPoolBackprop: input must be rank 4 but got rank ` + `${$input.rank}.`
);
if (dimRoundingMode != null) {
util["assert"](
util["isInt"](pad),
() =>
`Error in maxPoolBackprop: pad must be an integer when using, ` +
`dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`
);
}
const forward = (backend) => {
const convInfo = computePool2DInfo(
$input.shape,
filterSize,
strides,
1 /* dilations */,
pad,
dimRoundingMode
);
return backend.maxPoolBackprop($dy, $input, $output, convInfo);
};
const inputs = { dy: $dy, input: $input, output: $output };
const attrs = { filterSize, strides, pad, dimRoundingMode };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null,
kernel_names["T" /* MaxPoolBackprop */],
attrs
);
}
const maxPoolBackprop = Object(operation["a" /* op */])({ maxPoolBackprop_ });
//# sourceMappingURL=max_pool_backprop.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const maxPoolGradConfig = {
kernelName: kernel_names["Q" /* MaxPool */],
inputsToSave: ["x"],
outputsToSave: [true],
gradFunc: (dy, saved, attrs) => {
const [x, y] = saved;
const { filterSize, strides, pad } = attrs;
return {
x: () => maxPoolBackprop(dy, x, y, filterSize, strides, pad),
};
},
};
//# sourceMappingURL=MaxPool_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/greater.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the truth value of (a > b) element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 2, 3]);
* const b = tf.tensor1d([2, 2, 2]);
*
* a.greater(b).print();
* ```
*
* @param a The first input tensor.
* @param b The second input tensor. Must have the same dtype as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function greater_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "greater");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "greater");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
assertAndGetBroadcastShape($a.shape, $b.shape);
const forward = (backend) => backend.greater($a, $b);
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["H" /* Greater */]
);
}
const greater = Object(operation["a" /* op */])({ greater_ });
//# sourceMappingURL=greater.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the truth value of (a <= b) element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 2, 3]);
* const b = tf.tensor1d([2, 2, 2]);
*
* a.lessEqual(b).print();
* ```
*
* @param a The first input tensor.
* @param b The second input tensor. Must have the same dtype as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function lessEqual_(a, b) {
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "lessEqual");
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "lessEqual");
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
assertAndGetBroadcastShape($a.shape, $b.shape);
const forward = (backend, save) => {
const res = backend.lessEqual($a, $b);
save([$a, $b]);
return res;
};
const inputs = { a: $a, b: $b };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["O" /* LessEqual */]
);
}
const lessEqual = Object(operation["a" /* op */])({ lessEqual_ });
//# sourceMappingURL=less_equal.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Minimum_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const minimumGradConfig = {
kernelName: kernel_names["W" /* Minimum */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const derA = () => mul(dy, cast(lessEqual(a, b), "float32"));
const derB = () => mul(dy, cast(greater(a, b), "float32"));
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Minimum_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Mod_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const modGradConfig = {
kernelName: kernel_names["X" /* Mod */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
return reshape(sum(dy, reduceAxes), a.shape);
}
return dy;
};
const derB = () => {
const res = mul(dy, neg(floor(div(a, b))));
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
return reshape(sum(res, reduceAxes), b.shape);
}
return res;
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Mod_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Multiply_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const multiplyGradConfig = {
kernelName: kernel_names["Y" /* Multiply */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
const res = mul(dy, cast(b, "float32"));
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
return reshape(sum(res, reduceAxes), a.shape);
}
return res;
};
const derB = () => {
const res = mul(dy, cast(a, "float32"));
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
return reshape(sum(res, reduceAxes), b.shape);
}
return res;
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Multiply_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/OneHot_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const oneHotGradConfig = {
kernelName: kernel_names["cb" /* OneHot */],
inputsToSave: ["indices"],
gradFunc: (dy, saved) => {
const indices = saved[0];
return { indices: () => Object(tensor_ops["n" /* zeros */])(indices.shape, "float32") };
},
};
//# sourceMappingURL=OneHot_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/PadV2_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const padV2GradConfig = {
kernelName: kernel_names["db" /* PadV2 */],
inputsToSave: ["x"],
gradFunc: (dy, saved, attrs) => {
// Pad introduces values around the original tensor, so the gradient
// slices the original shape out of the gradient.
const x = saved[0];
const { paddings } = attrs;
const begin = paddings.map((p) => p[0]);
return { x: () => dy.slice(begin, x.shape) };
},
};
//# sourceMappingURL=PadV2_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** An implementation of the Where kernel shared between cpu and webgl */
function whereImpl(condShape, condVals) {
const indices = [];
for (let i = 0; i < condVals.length; i++) {
if (condVals[i]) {
indices.push(i);
}
}
const inBuffer = array_ops_buffer(condShape, "int32");
const out = array_ops_buffer([indices.length, condShape.length], "int32");
for (let i = 0; i < indices.length; i++) {
const loc = inBuffer.indexToLoc(indices[i]);
const offset = i * condShape.length;
out.values.set(loc, offset);
}
return out.toTensor();
}
//# sourceMappingURL=where_impl.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/logical_ops.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the truth value of `NOT x` element-wise.
*
* ```js
* const a = tf.tensor1d([false, true], 'bool');
*
* a.logicalNot().print();
* ```
*
* @param x The input tensor. Must be of dtype 'bool'.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function logicalNot_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "logicalNot", "bool");
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.logicalNot($x), { $x });
}
/**
* Returns the truth value of `a AND b` element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([false, false, true, true], 'bool');
* const b = tf.tensor1d([false, true, false, true], 'bool');
*
* a.logicalAnd(b).print();
* ```
*
* @param a The first input tensor. Must be of dtype bool.
* @param b The second input tensor. Must be of dtype bool.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function logicalAnd_(a, b) {
const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "logicalAnd", "bool");
const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "logicalAnd", "bool");
assertAndGetBroadcastShape($a.shape, $b.shape);
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.logicalAnd($a, $b),
{ a: $a, b: $b },
null /* grad */,
"LogicalAnd"
);
}
/**
* Returns the truth value of `a OR b` element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([false, false, true, true], 'bool');
* const b = tf.tensor1d([false, true, false, true], 'bool');
*
* a.logicalOr(b).print();
* ```
* @param a The first input tensor. Must be of dtype bool.
* @param b The second input tensor. Must be of dtype bool.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function logicalOr_(a, b) {
const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "logicalOr", "bool");
const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "logicalOr", "bool");
assertAndGetBroadcastShape($a.shape, $b.shape);
return engine["a" /* ENGINE */].runKernelFunc((backend) => backend.logicalOr($a, $b), {
$a,
$b,
});
}
/**
* Returns the truth value of `a XOR b` element-wise. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([false, false, true, true], 'bool');
* const b = tf.tensor1d([false, true, false, true], 'bool');
*
* a.logicalXor(b).print();
* ```
*
* @param a The first input tensor. Must be of dtype bool.
* @param b The second input tensor. Must be of dtype bool.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function logicalXor_(a, b) {
const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "logicalXor", "bool");
const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "logicalXor", "bool");
assertAndGetBroadcastShape($a.shape, $b.shape);
// x ^ y = (x | y) & ~(x & y)
return logicalOr(a, b).logicalAnd(logicalAnd(a, b).logicalNot());
}
/**
* Returns the elements, either `a` or `b` depending on the `condition`.
*
* If the condition is true, select from `a`, otherwise select from `b`.
*
* ```js
* const cond = tf.tensor1d([false, false, true], 'bool');
* const a = tf.tensor1d([1 , 2, 3]);
* const b = tf.tensor1d([-1, -2, -3]);
*
* a.where(cond, b).print();
* ```
*
* @param condition The input condition. Must be of dtype bool.
* @param a If `condition` is rank 1, `a` may have a higher rank but
* its first dimension must match the size of `condition`.
* @param b A tensor with the same shape and type as `a`.
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
function where_(condition, a, b) {
const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, "a", "where");
const $b = Object(tensor_util_env["a" /* convertToTensor */])(b, "b", "where");
const $condition = Object(tensor_util_env["a" /* convertToTensor */])(
condition,
"condition",
"where",
"bool"
);
Object(util["assertShapesMatch"])($a.shape, $b.shape, "Error in where: ");
if ($condition.rank === 1) {
// If condition rank is 1, then the first dimension must match the size of
// condition.
Object(util["assert"])(
$condition.shape[0] === $a.shape[0],
() => "The first dimension of `a` must match the size of `condition`."
);
} else {
// A must have the same shape as condition.
Object(util["assertShapesMatch"])($condition.shape, $b.shape, "Error in where: ");
}
// TODO(julianoks): Return null for condition gradient
// when backprop supports it.
const grad = (dy, saved) => {
const [$condition] = saved;
return {
condition: () => Object(tensor_ops["o" /* zerosLike */])($condition).toFloat(),
t: () => dy.mul($condition.cast(dy.dtype)),
e: () => dy.mul($condition.logicalNot().cast(dy.dtype)),
};
};
const inputs = { condition: $condition, t: $a, e: $b };
return engine["a" /* ENGINE */].runKernelFunc(
(backend, save) => {
const res = backend.select($condition, $a, $b);
save([$condition]);
return res;
},
inputs,
grad,
kernel_names["kb" /* SelectV2 */]
);
}
/**
* Returns the coordinates of true elements of condition.
*
* The coordinates are returned in a 2-D tensor where the first dimension (rows)
* represents the number of true elements, and the second dimension (columns)
* represents the coordinates of the true elements. Keep in mind, the shape of
* the output tensor can vary depending on how many true values there are in
* input. Indices are output in row-major order. The resulting tensor has the
* shape `[numTrueElems, condition.rank]`.
*
* This is analogous to calling the python `tf.where(cond)` without an x or y.
*
* ```js
* const cond = tf.tensor1d([false, false, true], 'bool');
* const result = await tf.whereAsync(cond);
* result.print();
* ```
*/
/** @doc {heading: 'Operations', subheading: 'Logical'} */
async function whereAsync_(condition) {
const $condition = Object(tensor_util_env["a" /* convertToTensor */])(
condition,
"condition",
"whereAsync",
"bool"
);
const vals = await $condition.data();
const res = whereImpl($condition.shape, vals);
if (condition !== $condition) {
$condition.dispose();
}
return res;
}
const logicalAnd = Object(operation["a" /* op */])({ logicalAnd_ });
const logicalNot = Object(operation["a" /* op */])({ logicalNot_ });
const logicalOr = Object(operation["a" /* op */])({ logicalOr_ });
const logicalXor = Object(operation["a" /* op */])({ logicalXor_ });
const where = Object(operation["a" /* op */])({ where_ });
const whereAsync = whereAsync_;
//# sourceMappingURL=logical_ops.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pow.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the power of one `tf.Tensor` to another. Supports broadcasting.
*
* Given a `tf.Tensor` x and a `tf.Tensor` y, this operation computes x^y for
* corresponding elements in x and y. The result's dtype will be the upcasted
* type of the `base` and `exp` dtypes.
*
* ```js
* const a = tf.tensor([[2, 3], [4, 5]])
* const b = tf.tensor([[1, 2], [3, 0]]).toInt();
*
* a.pow(b).print(); // or tf.pow(a, b)
* ```
*
* ```js
* const a = tf.tensor([[1, 2], [3, 4]])
* const b = tf.tensor(2).toInt();
*
* a.pow(b).print(); // or tf.pow(a, b)
* ```
* We also expose `powStrict` which has the same signature as this op and
* asserts that `base` and `exp` are the same shape (does not broadcast).
*
* @param base The base `tf.Tensor` to pow element-wise.
* @param exp The exponent `tf.Tensor` to pow element-wise.
*/
/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */
function pow_(base, exp) {
let $base = Object(tensor_util_env["a" /* convertToTensor */])(base, "base", "pow");
let $exp = Object(tensor_util_env["a" /* convertToTensor */])(exp, "exp", "pow");
[$base, $exp] = Object(tensor_util["makeTypesMatch"])($base, $exp);
const inputs = { a: $base, b: $exp };
const forward = (backend, save) => {
const y = backend.pow($base, $exp);
save([$base, $exp, y]);
return y;
};
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["fb" /* Pow */]
);
}
const pow = Object(operation["a" /* op */])({ pow_ });
//# sourceMappingURL=pow.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Pow_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const powGradConfig = {
kernelName: kernel_names["fb" /* Pow */],
inputsToSave: ["a", "b"],
outputsToSave: [true],
gradFunc: (dy, saved) => {
const [a, b, y] = saved;
const base = a;
const exp = b;
const outShape = assertAndGetBroadcastShape(base.shape, exp.shape);
const derBase = () => {
const expFloat = cast(exp, "float32");
let res = mul(
dy,
mul(expFloat, pow(base, sub(expFloat, Object(tensor_ops["e" /* scalar */])(1))))
);
const reduceAxes = getReductionAxes(base.shape, outShape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, base.shape);
};
const derExp = () => {
const condition = greater(base, 0);
const logBase = where(
condition,
log(base),
Object(tensor_ops["o" /* zerosLike */])(base)
);
let res = mul(dy, mul(y, logBase));
const reduceAxes = getReductionAxes(exp.shape, outShape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, exp.shape);
};
return { a: derBase, b: derExp };
},
};
//# sourceMappingURL=Pow_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Prelu_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const preluGradConfig = {
kernelName: kernel_names["gb" /* Prelu */],
inputsToSave: ["x", "alpha"],
gradFunc: (dy, saved) => {
const [x, alpha] = saved;
const mask = greater(x, 0);
return {
x: () => where(mask, dy, mul(dy, alpha)),
alpha: () => {
let res = where(mask, Object(tensor_ops["o" /* zerosLike */])(dy), mul(dy, x));
const reduceAxes = getReductionAxes(alpha.shape, dy.shape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, alpha.shape);
},
};
},
};
//# sourceMappingURL=Prelu_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Relu6_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const relu6GradConfig = {
kernelName: kernel_names["jb" /* Relu6 */],
inputsToSave: ["x"],
gradFunc: (dy, saved) => {
const [x] = saved;
const mask = mul(lessEqual(x, 6), unary_ops_step(x));
return { x: () => mul(dy, cast(mask, "float32")) };
},
};
//# sourceMappingURL=Relu6_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Relu_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const reluGradConfig = {
kernelName: kernel_names["ib" /* Relu */],
inputsToSave: ["x"],
gradFunc: (dy, saved) => {
const [x] = saved;
return { x: () => mul(dy, cast(unary_ops_step(x), "float32")) };
},
};
//# sourceMappingURL=Relu_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const SELU_SCALEALPHA = 1.7580993408473768599402175208123;
const SELU_SCALE = 1.0507009873554804934193349852946;
//# sourceMappingURL=selu_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Selu_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const seluGradConfig = {
kernelName: kernel_names["lb" /* Selu */],
inputsToSave: ["x"],
gradFunc: (dy, saved) => {
const [x] = saved;
return {
x: () => {
const mask = greater(x, Object(tensor_ops["e" /* scalar */])(0));
const scaleAlpha = Object(tensor_ops["e" /* scalar */])(SELU_SCALEALPHA);
const scale = Object(tensor_ops["e" /* scalar */])(SELU_SCALE);
const greaterThanZeroDer = mul(dy, scale);
const lessEqualZeroDer = mul(mul(dy, scaleAlpha), unary_ops_exp(cast(x, "float32")));
return where(mask, greaterThanZeroDer, lessEqualZeroDer);
},
};
},
};
//# sourceMappingURL=Selu_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
* shape `blockShape + [batch]`, interleaves these blocks back into the grid
* defined by the spatial dimensions `[1, ..., M]`, to obtain a result with
* the same rank as the input. The spatial dimensions of this intermediate
* result are then optionally cropped according to `crops` to produce the
* output. This is the reverse of `tf.spaceToBatchND`. See below for a precise
* description.
*
* ```js
* const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]);
* const blockShape = [2, 2];
* const crops = [[0, 0], [0, 0]];
*
* x.batchToSpaceND(blockShape, crops).print();
* ```
*
* @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
* remainingShape`, where spatialShape has `M` dimensions.
* @param blockShape A 1-D array. Must have shape `[M]`, all values must
* be >= 1.
* @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0.
* `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input
* dimension `i + 1`, which corresponds to spatial dimension `i`. It is required
* that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]`
*
* This operation is equivalent to the following steps:
*
* 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ...,
* blockShape[M-1], batch / prod(blockShape), x.shape[1], ...,
* x.shape[N-1]]`
*
* 2. Permute dimensions of `reshaped`to produce `permuted` of shape `[batch /
* prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M],
* blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
*
* 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch /
* prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] *
* blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
*
* 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted`
* according to `crops` to produce the output of shape: `[batch /
* prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1],
* ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] -
* crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]`
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function batchToSpaceND_(x, blockShape, crops) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "batchToSpaceND");
const prod = blockShape.reduce((a, b) => a * b);
util["assert"](
$x.rank >= 1 + blockShape.length,
() =>
`input rank is ${$x.rank} but should be > than blockShape.length ${blockShape.length}`
);
util["assert"](
crops.length === blockShape.length,
() =>
`crops.length is ${crops.length} but should be equal to blockShape.length ${blockShape.length}`
);
util["assert"](
$x.shape[0] % prod === 0,
() =>
`input tensor batch is ${$x.shape[0]} but is not divisible by the product of ` +
`the elements of blockShape ${blockShape.join(" * ")} === ${prod}`
);
const forward = (backend) => {
return backend.batchToSpaceND($x, blockShape, crops);
};
const inputs = { x: $x };
const attrs = { blockShape, crops };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* gradient */,
kernel_names["i" /* BatchToSpaceND */],
attrs
);
}
const batchToSpaceND = Object(operation["a" /* op */])({ batchToSpaceND_ });
//# sourceMappingURL=batch_to_space_nd.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/SpaceToBatchND_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const spaceToBatchNDGradConfig = {
kernelName: kernel_names["mb" /* SpaceToBatchND */],
gradFunc: (dy, saved, attrs) => {
const { blockShape, paddings } = attrs;
return { x: () => batchToSpaceND(dy, blockShape, paddings) };
},
};
//# sourceMappingURL=SpaceToBatchND_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/SplitV_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const splitVGradConfig = {
kernelName: kernel_names["nb" /* SplitV */],
gradFunc: (dy, saved, attrs) => {
const { axis } = attrs;
return { x: () => concat(dy, axis) };
},
};
//# sourceMappingURL=SplitV_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Square_grad.js
/**
* @license
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const squareGradConfig = {
kernelName: kernel_names["ob" /* Square */],
inputsToSave: ["x"],
gradFunc: (dy, saved) => {
const [x] = saved;
return { x: () => mul(dy, mul(x.toFloat(), 2)) };
},
};
//# sourceMappingURL=Square_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/SquaredDifference_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const squaredDifferenceGradConfig = {
kernelName: kernel_names["pb" /* SquaredDifference */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const two = Object(tensor_ops["e" /* scalar */])(2);
const derA = () => mul(dy, mul(two, sub(a, b)));
const derB = () => mul(dy, mul(two, sub(b, a)));
return { a: derA, b: derB };
},
};
//# sourceMappingURL=SquaredDifference_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Sub_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const subGradConfig = {
kernelName: kernel_names["qb" /* Sub */],
inputsToSave: ["a", "b"],
gradFunc: (dy, saved) => {
const [a, b] = saved;
const outShape = assertAndGetBroadcastShape(a.shape, b.shape);
const derA = () => {
let res = dy;
const reduceAxes = getReductionAxes(a.shape, outShape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, a.shape);
};
const derB = () => {
let res = dy;
const reduceAxes = getReductionAxes(b.shape, outShape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(neg(res), b.shape);
};
return { a: derA, b: derB };
},
};
//# sourceMappingURL=Sub_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Pads a `tf.Tensor` with a given value and paddings.
*
* This operation currently only implements the `CONSTANT` mode.
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that `paddings` is of given length.
* - `tf.pad1d`
* - `tf.pad2d`
* - `tf.pad3d`
* - `tf.pad4d`
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* x.pad([[1, 2]]).print();
* ```
* @param x The tensor to pad.
* @param paddings An array of length `R` (the rank of the tensor), where
* each element is a length-2 tuple of ints `[padBefore, padAfter]`,
* specifying how much to pad along each dimension of the tensor.
* @param constantValue The pad value to use. Defaults to 0.
*/
/** @doc {heading: 'Tensors', subheading: 'Transformations'} */
function pad_(x, paddings, constantValue = 0) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "pad");
if ($x.rank === 0) {
throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");
}
const forward = (backend, save) => {
save([$x]);
return backend.pad($x, paddings, constantValue);
};
const attrs = { paddings, constantValue };
const inputs = { x: $x };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["db" /* PadV2 */],
attrs
);
}
const pad_pad = Object(operation["a" /* op */])({ pad_ });
//# sourceMappingURL=pad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js
/**
* @license
* Copyright 2017 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function assertParamsValid(input, begin, size) {
util["assert"](
input.rank === begin.length,
() =>
`Error in slice${input.rank}D: Length of begin ${begin} must ` +
`match the rank of the array (${input.rank}).`
);
util["assert"](
input.rank === size.length,
() =>
`Error in slice${input.rank}D: Length of size ${size} must ` +
`match the rank of the array (${input.rank}).`
);
for (let i = 0; i < input.rank; ++i) {
util["assert"](
begin[i] + size[i] <= input.shape[i],
() =>
`Error in slice${input.rank}D: begin[${i}] + size[${i}] ` +
`(${begin[i] + size[i]}) would overflow input.shape[${i}] (${input.shape[i]})`
);
}
}
/** Converts a binary mask to an array of axes. Used in stridedSlice(). */
function maskToAxes(mask) {
const axes = [];
let axis = 0;
while (mask > 0) {
if (mask & 1) {
axes.push(axis);
}
mask /= 2;
axis++;
}
return axes;
}
/** Computes the output shape given the strided slice params. */
function slice_util_computeOutShape(begin, end, strides) {
const size = [];
for (let axis = 0; axis < begin.length; axis++) {
size[axis] = Math.ceil((end[axis] - begin[axis]) / strides[axis]);
}
return size;
}
// Creates full selection at the elided dimensions. If the dimension matches
// the ellipsis mask, override the current stride value. Otherwise, insert.
function stridesWithElidedDims(strides, ellipsisInsertionIndex, numElidedAxes) {
const newStrides = [...strides];
for (let i = 0; i < numElidedAxes; i++) {
if (i === 0) {
newStrides[ellipsisInsertionIndex] = 1;
} else {
newStrides.splice(
ellipsisInsertionIndex,
0 /* num elements to delete */,
1 /* element to add */
);
newStrides.pop();
}
}
return newStrides;
}
// Creates full selection at the elided dimensions. If the dimension matches
// the ellipsis mask, override the current start value. Otherwise, insert.
function startIndicesWithElidedDims(startIndices, ellipsisInsertionIndex, numElidedAxes) {
const newIndices = [...startIndices];
for (let i = 0; i < numElidedAxes; i++) {
if (i === 0) {
newIndices[ellipsisInsertionIndex] = 0;
} else {
newIndices.splice(
ellipsisInsertionIndex,
0 /* num elements to delete */,
0 /* element to add */
);
newIndices.pop();
}
}
return newIndices;
}
// Creates full selection at the elided dimensions. If the dimension matches
// the ellipsis mask, override the current stop value. Otherwise, insert.
function stopIndicesWithElidedDims(
stopIndices,
ellipsisInsertionIndex,
numElidedAxes,
inputShape
) {
const newIndices = [...stopIndices];
for (let i = 0; i < numElidedAxes; i++) {
if (i === 0) {
newIndices[ellipsisInsertionIndex] = Number.MAX_SAFE_INTEGER;
} else {
newIndices.splice(
ellipsisInsertionIndex,
0 /* num elements to delete */,
Number.MAX_SAFE_INTEGER /* element to add */
);
newIndices.pop();
}
}
for (let i = 0; i < newIndices.length; i++) {
newIndices[i] = util["clamp"](0, newIndices[i], inputShape[i]);
}
return newIndices;
}
function stridesForAxis(strides, axis, ellipsisMask) {
let stride = strides[axis];
if (ellipsisMask & (1 << axis) || stride == null) {
stride = 1;
}
return stride;
}
function startForAxis(beginMask, startIndices, strides, inputShape, axis, ellipsisMask) {
// Begin with the specified index
let start = startIndices[axis];
const stride = strides[axis] || 1;
// Check the axis bit from right of masked axes, or the begin index is not set
// for the axis.
if (beginMask & (1 << axis) || ellipsisMask & (1 << axis) || start == null) {
if (stride > 0) {
// Forward iteration - use the first element. These values will get
// clamped below (Note: We could have set them to 0 and axis_size-1, but
// use lowest() and max() to maintain symmetry with StopForAxis())
start = Number.MIN_SAFE_INTEGER;
} else {
// Backward iteration - use the last element.
start = Number.MAX_SAFE_INTEGER;
}
}
// Handle negative indices
const axisSize = inputShape[axis];
if (start < 0) {
start += axisSize;
}
// Clamping
start = util["clamp"](0, start, axisSize - 1);
return start;
}
function stopForAxis(endMask, stopIndices, strides, inputShape, axis, ellipsisMask) {
// Begin with the specified index
let stop = stopIndices[axis];
const stride = strides[axis] || 1;
// Check the axis bit from right of masked axes, or if the stop index is not
// set for this axis.
if (endMask & (1 << axis) || ellipsisMask & (1 << axis) || stop == null) {
if (stride > 0) {
// Forward iteration - use the last element. These values will get
// clamped below
stop = Number.MAX_SAFE_INTEGER;
} else {
// Backward iteration - use the first element.
stop = Number.MIN_SAFE_INTEGER;
}
}
// Handle negative indices
const axisSize = inputShape[axis];
if (stop < 0) {
stop += axisSize;
}
// Clamping
// Because the end index points one past the last element, we need slightly
// different clamping ranges depending on the direction.
if (stride > 0) {
// Forward iteration
stop = util["clamp"](0, stop, axisSize);
} else {
// Backward iteration
stop = util["clamp"](-1, stop, axisSize - 1);
}
return stop;
}
/**
* Returns true if the slice occupies a continous set of elements in the
* 'flat' space.
*/
function isSliceContinous(shape, begin, size) {
// Index of the first axis that has size > 1.
let firstNonOneAxis = size.length;
for (let i = 0; i < size.length; i++) {
if (size[i] > 1) {
firstNonOneAxis = i;
break;
}
}
for (let i = firstNonOneAxis + 1; i < size.length; i++) {
if (begin[i] > 0 || size[i] !== shape[i]) {
return false;
}
}
return true;
}
function computeFlatOffset(begin, strides) {
let flatOffset = begin.length > 0 ? begin[begin.length - 1] : 1;
for (let i = 0; i < begin.length - 1; i++) {
flatOffset += begin[i] * strides[i];
}
return flatOffset;
}
//# sourceMappingURL=slice_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts a 1D slice from 1D array starting at coordinates `begin` and is
* of length `size`. See `slice` for details.
*/
function slice1d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice1d");
util["assert"](
$x.rank === 1,
() => `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`
);
return slice($x, [begin], [size]);
}
/**
* Extracts a 2D slice from a 2D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
function slice2d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice2d");
util["assert"](
$x.rank === 2,
() => `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`
);
return slice($x, begin, size);
}
/**
* Extracts a 3D slice from a 3D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
function slice3d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice3d");
util["assert"](
$x.rank === 3,
() => `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`
);
return slice($x, begin, size);
}
/**
* Extracts a 4D slice from a 4D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
function slice4d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice4d");
util["assert"](
$x.rank === 4,
() => `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`
);
return slice($x, begin, size);
}
/**
* Extracts a slice from a `tf.Tensor` starting at coordinates `begin`
* and is of size `size`.
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that `x` is of the given rank:
* - `tf.slice1d`
* - `tf.slice2d`
* - `tf.slice3d`
* - `tf.slice4d`
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
*
* x.slice([1], [2]).print();
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* x.slice([1, 0], [1, 2]).print();
* ```
* @param x The input `tf.Tensor` to slice from.
* @param begin The coordinates to start the slice from. The length can be
* less than the rank of x - the rest of the axes will have implicit 0 as
* start. Can also be a single number, in which case it specifies the
* first axis.
* @param size The size of the slice. The length can be less than the rank of
* x - the rest of the axes will have implicit -1. A value of -1 requests
* the rest of the dimensions in the axis. Can also be a single number,
* in which case it specifies the size of the first axis.
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
function slice_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, "x", "slice");
if ($x.rank === 0) {
throw new Error("Slicing scalar is not possible");
}
// The following logic allows for more ergonomic calls.
let begin_;
if (typeof begin === "number") {
begin_ = [begin, ...new Array($x.rank - 1).fill(0)];
} else if (begin.length < $x.rank) {
begin_ = begin.concat(new Array($x.rank - begin.length).fill(0));
} else {
begin_ = begin.slice();
}
begin_.forEach((d) => {
util["assert"](d !== -1, () => "slice() does not support negative begin indexing.");
});
let size_;
if (size == null) {
size_ = new Array($x.rank).fill(-1);
} else if (typeof size === "number") {
size_ = [size, ...new Array($x.rank - 1).fill(-1)];
} else if (size.length < $x.rank) {
size_ = size.concat(new Array($x.rank - size.length).fill(-1));
} else {
size_ = size;
}
size_ = size_.map((d, i) => {
if (d >= 0) {
return d;
} else {
util["assert"](
d === -1,
() =>
`Negative size values should be exactly -1 but got ` +
`${d} for the slice() size at index ${i}.`
);
return $x.shape[i] - begin_[i];
}
});
assertParamsValid($x, begin_, size_);
const inputShape = $x.shape;
const grad = (dy) => {
// Create an Nx2 padding where the first column represents how many
// zeros are prepended (at start) for each dimension, and the second
// column indicates how many zeros are appended (at end).
// The number of zeros to append is the shape of the input
// elementwise-subtracted by both the begin vector and sizes vector.
const paddings = [];
for (let i = 0; i < dy.rank; i++) {
paddings.push([begin_[i], inputShape[i] - begin_[i] - size_[i]]);
}
return { x: () => pad_pad(dy, paddings) };
};
const attrs = { begin: begin_, size: size_ };
return engine["a" /* ENGINE */].runKernelFunc(
(backend) => backend.slice($x, begin_, size_),
{ x: $x },
grad,
"Slice",
attrs
);
}
const slice = Object(operation["a" /* op */])({ slice_ });
const slice1d = Object(operation["a" /* op */])({ slice1d_ });
const slice2d = Object(operation["a" /* op */])({ slice2d_ });
const slice3d = Object(operation["a" /* op */])({ slice3d_ });
const slice4d = Object(operation["a" /* op */])({ slice4d_ });
//# sourceMappingURL=slice.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Tile_grad.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const tileGradConfig = {
kernelName: kernel_names["rb" /* Tile */],
inputsToSave: ["x"],
gradFunc: (dy, saved, attrs) => {
const [x] = saved;
const { reps } = attrs;
const derX = () => {
let xGrad = Object(tensor_ops["o" /* zerosLike */])(x);
// TODO(cais): Maybe reduce memory footprint by avoiding repeated
// slicing.
if (x.rank === 1) {
for (let i = 0; i < reps[0]; ++i) {
xGrad = add(xGrad, slice(dy, [i * x.shape[0]], [x.shape[0]]));
}
} else if (x.rank === 2) {
for (let i = 0; i < reps[0]; ++i) {
for (let j = 0; j < reps[1]; ++j) {
xGrad = add(
xGrad,
slice(dy, [i * x.shape[0], j * x.shape[1]], [x.shape[0], x.shape[1]])
);
}
}
} else if (x.rank === 3) {
for (let i = 0; i < reps[0]; ++i) {
for (let j = 0; j < reps[1]; ++j) {
for (let k = 0; k < reps[2]; ++k) {
xGrad = add(
xGrad,
slice(
dy,
[i * x.shape[0], j * x.shape[1], k * x.shape[2]],
[x.shape[0], x.shape[1], x.shape[2]]
)
);
}
}
}
} else if (x.rank === 4) {
for (let i = 0; i < reps[0]; ++i) {
for (let j = 0; j < reps[1]; ++j) {
for (let k = 0; k < reps[2]; ++k) {
for (let l = 0; l < reps[3]; ++l) {
xGrad = add(
xGrad,
slice(
dy,
[i * x.shape[0], j * x.shape[1], k * x.shape[2], l * x.shape[3]],
[x.shape[0], x.shape[1], x.shape[2], x.shape[3]]
)
);
}
}
}
}
} else {
throw new Error(
`Gradient for tile operation is not implemented for rank-` +
`${x.rank} tensors yet.`
);
}
return xGrad;
};
return { x: derX };
},
};
//# sourceMappingURL=Tile_grad.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients/Transpose_grad.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const transposeGradConfig = {
kernelName: kernel_names["sb" /* Transpose */],
gradFunc: (dy, saved, attrs) => {
const transposeAttrs = attrs;
const { perm } = transposeAttrs;
const undoPerm = getUndoAxesPermutation(perm);
return { x: () => transpose(dy, undoPerm) };
},
};
//# sourceMappingURL=Transpose_grad.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js
var kernel_registry = __webpack_require__(17);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/register_all_gradients.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Export all kernel configs here so that the package can auto register them
const gradConfigs = [
addGradConfig,
addNGradConfig,
atan2GradConfig,
avgPoolGradConfig,
avgPool3DGradConfig,
batchMatMulGradConfig,
batchToSpaceNDGradConfig,
broadcastToGradConfig,
concatGradConfig,
conv2DGradConfig,
conv2DBackpropInputGradConfig,
conv3DGradConfig,
cumsumGradConfig,
depthwiseConv2dNativeGradConfig,
divGradConfig,
eluGradConfig,
floorDivGradConfig,
fusedBatchNormGradConfig,
greaterEqualGradConfig,
identityGradConfig,
lrnGradConfig,
oneHotGradConfig,
padV2GradConfig,
splitVGradConfig,
maxGradConfig,
spaceToBatchNDGradConfig,
maxGradConfig,
maximumGradConfig,
maxPoolGradConfig,
maxPool3DGradConfig,
minimumGradConfig,
modGradConfig,
multiplyGradConfig,
oneHotGradConfig,
padV2GradConfig,
powGradConfig,
preluGradConfig,
reluGradConfig,
relu6GradConfig,
seluGradConfig,
spaceToBatchNDGradConfig,
splitVGradConfig,
squareGradConfig,
squaredDifferenceGradConfig,
tileGradConfig,
transposeGradConfig,
subGradConfig,
];
for (const gradientConfig of gradConfigs) {
Object(kernel_registry["d" /* registerGradient */])(gradientConfig);
}
//# sourceMappingURL=register_all_gradients.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/environment.js
var environment = __webpack_require__(10);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class PlatformBrowser {
fetch(path, init) {
return fetch(path, init);
}
now() {
return performance.now();
}
encode(text, encoding) {
if (encoding !== "utf-8" && encoding !== "utf8") {
throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);
}
if (this.textEncoder == null) {
this.textEncoder = new TextEncoder();
}
return this.textEncoder.encode(text);
}
decode(bytes, encoding) {
return new TextDecoder(encoding).decode(bytes);
}
}
if (Object(environment["c" /* env */])().get("IS_BROWSER")) {
Object(environment["c" /* env */])().setPlatform("browser", new PlatformBrowser());
}
//# sourceMappingURL=platform_browser.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js
var platform_node = __webpack_require__(62);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js
var io_utils = __webpack_require__(13);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class IORouterRegistry {
constructor() {
this.saveRouters = [];
this.loadRouters = [];
}
static getInstance() {
if (IORouterRegistry.instance == null) {
IORouterRegistry.instance = new IORouterRegistry();
}
return IORouterRegistry.instance;
}
/**
* Register a save-handler router.
*
* @param saveRouter A function that maps a URL-like string onto an instance
* of `IOHandler` with the `save` method defined or `null`.
*/
static registerSaveRouter(saveRouter) {
IORouterRegistry.getInstance().saveRouters.push(saveRouter);
}
/**
* Register a load-handler router.
*
* @param loadRouter A function that maps a URL-like string onto an instance
* of `IOHandler` with the `load` method defined or `null`.
*/
static registerLoadRouter(loadRouter) {
IORouterRegistry.getInstance().loadRouters.push(loadRouter);
}
/**
* Look up IOHandler for saving, given a URL-like string.
*
* @param url
* @returns If only one match is found, an instance of IOHandler with the
* `save` method defined. If no match is found, `null`.
* @throws Error, if more than one match is found.
*/
static getSaveHandlers(url) {
return IORouterRegistry.getHandlers(url, "save");
}
/**
* Look up IOHandler for loading, given a URL-like string.
*
* @param url
* @param loadOptions Optional, custom load options.
* @returns All valid handlers for `url`, given the currently registered
* handler routers.
*/
static getLoadHandlers(url, loadOptions) {
return IORouterRegistry.getHandlers(url, "load", loadOptions);
}
static getHandlers(url, handlerType, loadOptions) {
const validHandlers = [];
const routers =
handlerType === "load"
? IORouterRegistry.getInstance().loadRouters
: IORouterRegistry.getInstance().saveRouters;
routers.forEach((router) => {
const handler = router(url, loadOptions);
if (handler !== null) {
validHandlers.push(handler);
}
});
return validHandlers;
}
}
const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);
const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);
const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);
const getLoadHandlers = (url, loadOptions) =>
IORouterRegistry.getLoadHandlers(url, loadOptions);
//# sourceMappingURL=router_registry.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/model_management.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Classes and functions for model management across multiple storage mediums.
*
* Supported client actions:
* - Listing models on all registered storage mediums.
* - Remove model by URL from any registered storage mediums, by using URL
* string.
* - Moving or copying model from one path to another in the same medium or from
* one medium to another, by using URL strings.
*/
const URL_SCHEME_SUFFIX = "://";
class model_management_ModelStoreManagerRegistry {
constructor() {
this.managers = {};
}
static getInstance() {
if (model_management_ModelStoreManagerRegistry.instance == null) {
model_management_ModelStoreManagerRegistry.instance =
new model_management_ModelStoreManagerRegistry();
}
return model_management_ModelStoreManagerRegistry.instance;
}
/**
* Register a save-handler router.
*
* @param saveRouter A function that maps a URL-like string onto an instance
* of `IOHandler` with the `save` method defined or `null`.
*/
static registerManager(scheme, manager) {
Object(util["assert"])(scheme != null, () => "scheme must not be undefined or null.");
if (scheme.endsWith(URL_SCHEME_SUFFIX)) {
scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));
}
Object(util["assert"])(scheme.length > 0, () => "scheme must not be an empty string.");
const registry = model_management_ModelStoreManagerRegistry.getInstance();
Object(util["assert"])(
registry.managers[scheme] == null,
() => `A model store manager is already registered for scheme '${scheme}'.`
);
registry.managers[scheme] = manager;
}
static getManager(scheme) {
const manager = this.getInstance().managers[scheme];
if (manager == null) {
throw new Error(`Cannot find model manager for scheme '${scheme}'`);
}
return manager;
}
static getSchemes() {
return Object.keys(this.getInstance().managers);
}
}
/**
* Helper method for parsing a URL string into a scheme and a path.
*
* @param url E.g., 'localstorage://my-model'
* @returns A dictionary with two fields: scheme and path.
* Scheme: e.g., 'localstorage' in the example above.
* Path: e.g., 'my-model' in the example above.
*/
function parseURL(url) {
if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {
throw new Error(
`The url string provided does not contain a scheme. ` +
`Supported schemes are: ` +
`${model_management_ModelStoreManagerRegistry.getSchemes().join(",")}`
);
}
return {
scheme: url.split(URL_SCHEME_SUFFIX)[0],
path: url.split(URL_SCHEME_SUFFIX)[1],
};
}
async function cloneModelInternal(sourceURL, destURL, deleteSource = false) {
Object(util["assert"])(
sourceURL !== destURL,
() => `Old path and new path are the same: '${sourceURL}'`
);
const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);
Object(util["assert"])(
loadHandlers.length > 0,
() => `Copying failed because no load handler is found for source URL ${sourceURL}.`
);
Object(util["assert"])(
loadHandlers.length < 2,
() =>
`Copying failed because more than one (${loadHandlers.length}) ` +
`load handlers for source URL ${sourceURL}.`
);
const loadHandler = loadHandlers[0];
const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);
Object(util["assert"])(
saveHandlers.length > 0,
() =>
`Copying failed because no save handler is found for destination ` + `URL ${destURL}.`
);
Object(util["assert"])(
saveHandlers.length < 2,
() =>
`Copying failed because more than one (${loadHandlers.length}) ` +
`save handlers for destination URL ${destURL}.`
);
const saveHandler = saveHandlers[0];
const sourceScheme = parseURL(sourceURL).scheme;
const sourcePath = parseURL(sourceURL).path;
const sameMedium = sourceScheme === parseURL(sourceURL).scheme;
const modelArtifacts = await loadHandler.load();
// If moving within the same storage medium, remove the old model as soon as
// the loading is done. Without doing this, it is possible that the combined
// size of the two models will cause the cloning to fail.
if (deleteSource && sameMedium) {
await model_management_ModelStoreManagerRegistry
.getManager(sourceScheme)
.removeModel(sourcePath);
}
const saveResult = await saveHandler.save(modelArtifacts);
// If moving between mediums, the deletion is done after the save succeeds.
// This guards against the case in which saving to the destination medium
// fails.
if (deleteSource && !sameMedium) {
await model_management_ModelStoreManagerRegistry
.getManager(sourceScheme)
.removeModel(sourcePath);
}
return saveResult.modelArtifactsInfo;
}
/**
* List all models stored in registered storage mediums.
*
* For a web browser environment, the registered mediums are Local Storage and
* IndexedDB.
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Delete the model.
* await tf.io.removeModel('localstorage://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
* ```
*
* @returns A `Promise` of a dictionary mapping URLs of existing models to
* their model artifacts info. URLs include medium-specific schemes, e.g.,
* 'indexeddb://my/model/1'. Model artifacts info include type of the
* model's topology, byte sizes of the topology, weights, etc.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function listModels() {
const schemes = model_management_ModelStoreManagerRegistry.getSchemes();
const out = {};
for (const scheme of schemes) {
const schemeOut = await model_management_ModelStoreManagerRegistry
.getManager(scheme)
.listModels();
for (const path in schemeOut) {
const url = scheme + URL_SCHEME_SUFFIX + path;
out[url] = schemeOut[path];
}
}
return out;
}
/**
* Remove a model specified by URL from a reigstered storage medium.
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Delete the model.
* await tf.io.removeModel('localstorage://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
* ```
*
* @param url A URL to a stored model, with a scheme prefix, e.g.,
* 'localstorage://my-model-1', 'indexeddb://my/model/2'.
* @returns ModelArtifactsInfo of the deleted model (if and only if deletion
* is successful).
* @throws Error if deletion fails, e.g., if no model exists at `path`.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function removeModel(url) {
const schemeAndPath = parseURL(url);
const manager = model_management_ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);
return manager.removeModel(schemeAndPath.path);
}
/**
* Copy a model from one URL to another.
*
* This function supports:
*
* 1. Copying within a storage medium, e.g.,
* `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`
* 2. Copying between two storage mediums, e.g.,
* `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Copy the model, from Local Storage to IndexedDB.
* await tf.io.copyModel(
* 'localstorage://demo/management/model1',
* 'indexeddb://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Remove both models.
* await tf.io.removeModel('localstorage://demo/management/model1');
* await tf.io.removeModel('indexeddb://demo/management/model1');
* ```
*
* @param sourceURL Source URL of copying.
* @param destURL Destination URL of copying.
* @returns ModelArtifactsInfo of the copied model (if and only if copying
* is successful).
* @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or
* if `oldPath` and `newPath` are identical.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function copyModel(sourceURL, destURL) {
const deleteSource = false;
return cloneModelInternal(sourceURL, destURL, deleteSource);
}
/**
* Move a model from one URL to another.
*
* This function supports:
*
* 1. Moving within a storage medium, e.g.,
* `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`
* 2. Moving between two storage mediums, e.g.,
* `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Move the model, from Local Storage to IndexedDB.
* await tf.io.moveModel(
* 'localstorage://demo/management/model1',
* 'indexeddb://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Remove the moved model.
* await tf.io.removeModel('indexeddb://demo/management/model1');
* ```
*
* @param sourceURL Source URL of moving.
* @param destURL Destination URL of moving.
* @returns ModelArtifactsInfo of the copied model (if and only if copying
* is successful).
* @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or
* if `oldPath` and `newPath` are identical.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function moveModel(sourceURL, destURL) {
const deleteSource = true;
return cloneModelInternal(sourceURL, destURL, deleteSource);
}
//# sourceMappingURL=model_management.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const DATABASE_NAME = "tensorflowjs";
const DATABASE_VERSION = 1;
// Model data and ModelArtifactsInfo (metadata) are stored in two separate
// stores for efficient access of the list of stored models and their metadata.
// 1. The object store for model data: topology, weights and weight manifests.
const MODEL_STORE_NAME = "models_store";
// 2. The object store for ModelArtifactsInfo, including meta-information such
// as the type of topology (JSON vs binary), byte size of the topology, byte
// size of the weights, etc.
const INFO_STORE_NAME = "model_info_store";
/**
* Delete the entire database for tensorflow.js, including the models store.
*/
async function deleteDatabase() {
const idbFactory = getIndexedDBFactory();
return new Promise((resolve, reject) => {
const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);
deleteRequest.onsuccess = () => resolve();
deleteRequest.onerror = (error) => reject(error);
});
}
function getIndexedDBFactory() {
if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
// TODO(cais): Add more info about what IOHandler subtypes are available.
// Maybe point to a doc page on the web and/or automatically determine
// the available IOHandlers and print them in the error message.
throw new Error(
"Failed to obtain IndexedDB factory because the current environment" +
"is not a web browser."
);
}
// tslint:disable-next-line:no-any
const theWindow = typeof window === "undefined" ? self : window;
const factory =
theWindow.indexedDB ||
theWindow.mozIndexedDB ||
theWindow.webkitIndexedDB ||
theWindow.msIndexedDB ||
theWindow.shimIndexedDB;
if (factory == null) {
throw new Error("The current browser does not appear to support IndexedDB.");
}
return factory;
}
function setUpDatabase(openRequest) {
const db = openRequest.result;
db.createObjectStore(MODEL_STORE_NAME, { keyPath: "modelPath" });
db.createObjectStore(INFO_STORE_NAME, { keyPath: "modelPath" });
}
/**
* IOHandler subclass: Browser IndexedDB.
*
* See the doc string of `browserIndexedDB` for more details.
*/
class indexed_db_BrowserIndexedDB {
constructor(modelPath) {
this.indexedDB = getIndexedDBFactory();
if (modelPath == null || !modelPath) {
throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");
}
this.modelPath = modelPath;
}
async save(modelArtifacts) {
// TODO(cais): Support saving GraphDef models.
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error(
"BrowserLocalStorage.save() does not support saving model topology " +
"in binary formats yet."
);
}
return this.databaseAction(this.modelPath, modelArtifacts);
}
async load() {
return this.databaseAction(this.modelPath);
}
/**
* Perform database action to put model artifacts into or read model artifacts
* from IndexedDB object store.
*
* Whether the action is put or get depends on whether `modelArtifacts` is
* specified. If it is specified, the action will be put; otherwise the action
* will be get.
*
* @param modelPath A unique string path for the model.
* @param modelArtifacts If specified, it will be the model artifacts to be
* stored in IndexedDB.
* @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`
* of `ModelArtifacts`, if the action is get.
*/
databaseAction(modelPath, modelArtifacts) {
return new Promise((resolve, reject) => {
const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
openRequest.onsuccess = () => {
const db = openRequest.result;
if (modelArtifacts == null) {
// Read model out from object store.
const modelTx = db.transaction(MODEL_STORE_NAME, "readonly");
const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
const getRequest = modelStore.get(this.modelPath);
getRequest.onsuccess = () => {
if (getRequest.result == null) {
db.close();
return reject(
new Error(
`Cannot find model with path '${this.modelPath}' ` + `in IndexedDB.`
)
);
} else {
resolve(getRequest.result.modelArtifacts);
}
};
getRequest.onerror = (error) => {
db.close();
return reject(getRequest.error);
};
modelTx.oncomplete = () => db.close();
} else {
// Put model into object store.
const modelArtifactsInfo = Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
modelArtifacts
);
// First, put ModelArtifactsInfo into info store.
const infoTx = db.transaction(INFO_STORE_NAME, "readwrite");
let infoStore = infoTx.objectStore(INFO_STORE_NAME);
const putInfoRequest = infoStore.put({
modelPath: this.modelPath,
modelArtifactsInfo,
});
let modelTx;
putInfoRequest.onsuccess = () => {
// Second, put model data into model store.
modelTx = db.transaction(MODEL_STORE_NAME, "readwrite");
const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
const putModelRequest = modelStore.put({
modelPath: this.modelPath,
modelArtifacts,
modelArtifactsInfo,
});
putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });
putModelRequest.onerror = (error) => {
// If the put-model request fails, roll back the info entry as
// well.
infoStore = infoTx.objectStore(INFO_STORE_NAME);
const deleteInfoRequest = infoStore.delete(this.modelPath);
deleteInfoRequest.onsuccess = () => {
db.close();
return reject(putModelRequest.error);
};
deleteInfoRequest.onerror = (error) => {
db.close();
return reject(putModelRequest.error);
};
};
};
putInfoRequest.onerror = (error) => {
db.close();
return reject(putInfoRequest.error);
};
infoTx.oncomplete = () => {
if (modelTx == null) {
db.close();
} else {
modelTx.oncomplete = () => db.close();
}
};
}
};
openRequest.onerror = (error) => reject(openRequest.error);
});
}
}
indexed_db_BrowserIndexedDB.URL_SCHEME = "indexeddb://";
const indexedDBRouter = (url) => {
if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
return null;
} else {
if (!Array.isArray(url) && url.startsWith(indexed_db_BrowserIndexedDB.URL_SCHEME)) {
return browserIndexedDB(url.slice(indexed_db_BrowserIndexedDB.URL_SCHEME.length));
} else {
return null;
}
}
};
IORouterRegistry.registerSaveRouter(indexedDBRouter);
IORouterRegistry.registerLoadRouter(indexedDBRouter);
/**
* Creates a browser IndexedDB IOHandler for saving and loading models.
*
* ```js
* const model = tf.sequential();
* model.add(
* tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
*
* const saveResult = await model.save('indexeddb://MyModel'));
* console.log(saveResult);
* ```
*
* @param modelPath A unique identifier for the model to be saved. Must be a
* non-empty string.
* @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),
* which can be used with, e.g., `tf.Model.save`.
*/
function browserIndexedDB(modelPath) {
return new indexed_db_BrowserIndexedDB(modelPath);
}
function maybeStripScheme(key) {
return key.startsWith(indexed_db_BrowserIndexedDB.URL_SCHEME)
? key.slice(indexed_db_BrowserIndexedDB.URL_SCHEME.length)
: key;
}
class BrowserIndexedDBManager {
constructor() {
this.indexedDB = getIndexedDBFactory();
}
async listModels() {
return new Promise((resolve, reject) => {
const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
openRequest.onsuccess = () => {
const db = openRequest.result;
const tx = db.transaction(INFO_STORE_NAME, "readonly");
const store = tx.objectStore(INFO_STORE_NAME);
// tslint:disable:max-line-length
// Need to cast `store` as `any` here because TypeScript's DOM
// library does not have the `getAll()` method even though the
// method is supported in the latest version of most mainstream
// browsers:
// https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll
// tslint:enable:max-line-length
// tslint:disable-next-line:no-any
const getAllInfoRequest = store.getAll();
getAllInfoRequest.onsuccess = () => {
const out = {};
for (const item of getAllInfoRequest.result) {
out[item.modelPath] = item.modelArtifactsInfo;
}
resolve(out);
};
getAllInfoRequest.onerror = (error) => {
db.close();
return reject(getAllInfoRequest.error);
};
tx.oncomplete = () => db.close();
};
openRequest.onerror = (error) => reject(openRequest.error);
});
}
async removeModel(path) {
path = maybeStripScheme(path);
return new Promise((resolve, reject) => {
const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
openRequest.onsuccess = () => {
const db = openRequest.result;
const infoTx = db.transaction(INFO_STORE_NAME, "readwrite");
const infoStore = infoTx.objectStore(INFO_STORE_NAME);
const getInfoRequest = infoStore.get(path);
let modelTx;
getInfoRequest.onsuccess = () => {
if (getInfoRequest.result == null) {
db.close();
return reject(
new Error(`Cannot find model with path '${path}' ` + `in IndexedDB.`)
);
} else {
// First, delete the entry in the info store.
const deleteInfoRequest = infoStore.delete(path);
const deleteModelData = () => {
// Second, delete the entry in the model store.
modelTx = db.transaction(MODEL_STORE_NAME, "readwrite");
const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
const deleteModelRequest = modelStore.delete(path);
deleteModelRequest.onsuccess = () =>
resolve(getInfoRequest.result.modelArtifactsInfo);
deleteModelRequest.onerror = (error) => reject(getInfoRequest.error);
};
// Proceed with deleting model data regardless of whether deletion
// of info data succeeds or not.
deleteInfoRequest.onsuccess = deleteModelData;
deleteInfoRequest.onerror = (error) => {
deleteModelData();
db.close();
return reject(getInfoRequest.error);
};
}
};
getInfoRequest.onerror = (error) => {
db.close();
return reject(getInfoRequest.error);
};
infoTx.oncomplete = () => {
if (modelTx == null) {
db.close();
} else {
modelTx.oncomplete = () => db.close();
}
};
};
openRequest.onerror = (error) => reject(openRequest.error);
});
}
}
if (Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
// Wrap the construction and registration, to guard against browsers that
// don't support Local Storage.
try {
model_management_ModelStoreManagerRegistry.registerManager(
indexed_db_BrowserIndexedDB.URL_SCHEME,
new BrowserIndexedDBManager()
);
} catch (err) {}
}
//# sourceMappingURL=indexed_db.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const PATH_SEPARATOR = "/";
const PATH_PREFIX = "tensorflowjs_models";
const INFO_SUFFIX = "info";
const MODEL_TOPOLOGY_SUFFIX = "model_topology";
const WEIGHT_SPECS_SUFFIX = "weight_specs";
const WEIGHT_DATA_SUFFIX = "weight_data";
const MODEL_METADATA_SUFFIX = "model_metadata";
/**
* Purge all tensorflow.js-saved model artifacts from local storage.
*
* @returns Paths of the models purged.
*/
function purgeLocalStorageArtifacts() {
if (
!Object(environment["c" /* env */])().getBool("IS_BROWSER") ||
typeof window === "undefined" ||
typeof window.localStorage === "undefined"
) {
throw new Error(
"purgeLocalStorageModels() cannot proceed because local storage is " +
"unavailable in the current environment."
);
}
const LS = window.localStorage;
const purgedModelPaths = [];
for (let i = 0; i < LS.length; ++i) {
const key = LS.key(i);
const prefix = PATH_PREFIX + PATH_SEPARATOR;
if (key.startsWith(prefix) && key.length > prefix.length) {
LS.removeItem(key);
const modelName = getModelPathFromKey(key);
if (purgedModelPaths.indexOf(modelName) === -1) {
purgedModelPaths.push(modelName);
}
}
}
return purgedModelPaths;
}
function getModelKeys(path) {
return {
info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),
topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),
weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),
weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),
modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR),
};
}
/**
* Get model path from a local-storage key.
*
* E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'
*
* @param key
*/
function getModelPathFromKey(key) {
const items = key.split(PATH_SEPARATOR);
if (items.length < 3) {
throw new Error(`Invalid key format: ${key}`);
}
return items.slice(1, items.length - 1).join(PATH_SEPARATOR);
}
function local_storage_maybeStripScheme(key) {
return key.startsWith(local_storage_BrowserLocalStorage.URL_SCHEME)
? key.slice(local_storage_BrowserLocalStorage.URL_SCHEME.length)
: key;
}
/**
* IOHandler subclass: Browser Local Storage.
*
* See the doc string to `browserLocalStorage` for more details.
*/
class local_storage_BrowserLocalStorage {
constructor(modelPath) {
if (
!Object(environment["c" /* env */])().getBool("IS_BROWSER") ||
typeof window === "undefined" ||
typeof window.localStorage === "undefined"
) {
// TODO(cais): Add more info about what IOHandler subtypes are
// available.
// Maybe point to a doc page on the web and/or automatically determine
// the available IOHandlers and print them in the error message.
throw new Error("The current environment does not support local storage.");
}
this.LS = window.localStorage;
if (modelPath == null || !modelPath) {
throw new Error("For local storage, modelPath must not be null, undefined or empty.");
}
this.modelPath = modelPath;
this.keys = getModelKeys(this.modelPath);
}
/**
* Save model artifacts to browser local storage.
*
* See the documentation to `browserLocalStorage` for details on the saved
* artifacts.
*
* @param modelArtifacts The model artifacts to be stored.
* @returns An instance of SaveResult.
*/
async save(modelArtifacts) {
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error(
"BrowserLocalStorage.save() does not support saving model topology " +
"in binary formats yet."
);
} else {
const topology = JSON.stringify(modelArtifacts.modelTopology);
const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);
const modelArtifactsInfo = Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
modelArtifacts
);
try {
this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));
this.LS.setItem(this.keys.topology, topology);
this.LS.setItem(this.keys.weightSpecs, weightSpecs);
this.LS.setItem(
this.keys.weightData,
Object(io_utils["a" /* arrayBufferToBase64String */])(modelArtifacts.weightData)
);
this.LS.setItem(
this.keys.modelMetadata,
JSON.stringify({
format: modelArtifacts.format,
generatedBy: modelArtifacts.generatedBy,
convertedBy: modelArtifacts.convertedBy,
userDefinedMetadata: modelArtifacts.userDefinedMetadata,
})
);
return { modelArtifactsInfo };
} catch (err) {
// If saving failed, clean up all items saved so far.
this.LS.removeItem(this.keys.info);
this.LS.removeItem(this.keys.topology);
this.LS.removeItem(this.keys.weightSpecs);
this.LS.removeItem(this.keys.weightData);
this.LS.removeItem(this.keys.modelMetadata);
throw new Error(
`Failed to save model '${this.modelPath}' to local storage: ` +
`size quota being exceeded is a possible cause of this failure: ` +
`modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +
`weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +
`weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`
);
}
}
}
/**
* Load a model from local storage.
*
* See the documentation to `browserLocalStorage` for details on the saved
* artifacts.
*
* @returns The loaded model (if loading succeeds).
*/
async load() {
const info = JSON.parse(this.LS.getItem(this.keys.info));
if (info == null) {
throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);
}
if (info.modelTopologyType !== "JSON") {
throw new Error(
"BrowserLocalStorage does not support loading non-JSON model " + "topology yet."
);
}
const out = {};
// Load topology.
const topology = JSON.parse(this.LS.getItem(this.keys.topology));
if (topology == null) {
throw new Error(
`In local storage, the topology of model '${this.modelPath}' ` + `is missing.`
);
}
out.modelTopology = topology;
// Load weight specs.
const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));
if (weightSpecs == null) {
throw new Error(
`In local storage, the weight specs of model '${this.modelPath}' ` + `are missing.`
);
}
out.weightSpecs = weightSpecs;
// Load meta-data fields.
const metadataString = this.LS.getItem(this.keys.modelMetadata);
if (metadataString != null) {
const metadata = JSON.parse(metadataString);
out.format = metadata["format"];
out.generatedBy = metadata["generatedBy"];
out.convertedBy = metadata["convertedBy"];
out.userDefinedMetadata = metadata["userDefinedMetadata"];
}
// Load weight data.
const weightDataBase64 = this.LS.getItem(this.keys.weightData);
if (weightDataBase64 == null) {
throw new Error(
`In local storage, the binary weight values of model ` +
`'${this.modelPath}' are missing.`
);
}
out.weightData = Object(io_utils["b" /* base64StringToArrayBuffer */])(weightDataBase64);
return out;
}
}
local_storage_BrowserLocalStorage.URL_SCHEME = "localstorage://";
const localStorageRouter = (url) => {
if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
return null;
} else {
if (!Array.isArray(url) && url.startsWith(local_storage_BrowserLocalStorage.URL_SCHEME)) {
return browserLocalStorage(
url.slice(local_storage_BrowserLocalStorage.URL_SCHEME.length)
);
} else {
return null;
}
}
};
IORouterRegistry.registerSaveRouter(localStorageRouter);
IORouterRegistry.registerLoadRouter(localStorageRouter);
/**
* Factory function for local storage IOHandler.
*
* This `IOHandler` supports both `save` and `load`.
*
* For each model's saved artifacts, four items are saved to local storage.
* - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the
* model, such as date saved, type of the topology, size in bytes, etc.
* - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-
* style models, this is a stringized JSON.
* - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the
* model, can be used to decode the saved binary weight values (see
* item below).
* - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary
* weight values, stored as a base64-encoded string.
*
* Saving may throw an `Error` if the total size of the artifacts exceed the
* browser-specific quota.
*
* @param modelPath A unique identifier for the model to be saved. Must be a
* non-empty string.
* @returns An instance of `IOHandler`, which can be used with, e.g.,
* `tf.Model.save`.
*/
function browserLocalStorage(modelPath) {
return new local_storage_BrowserLocalStorage(modelPath);
}
class local_storage_BrowserLocalStorageManager {
constructor() {
Object(util["assert"])(
Object(environment["c" /* env */])().getBool("IS_BROWSER"),
() => "Current environment is not a web browser"
);
Object(util["assert"])(
typeof window === "undefined" || typeof window.localStorage !== "undefined",
() => "Current browser does not appear to support localStorage"
);
this.LS = window.localStorage;
}
async listModels() {
const out = {};
const prefix = PATH_PREFIX + PATH_SEPARATOR;
const suffix = PATH_SEPARATOR + INFO_SUFFIX;
for (let i = 0; i < this.LS.length; ++i) {
const key = this.LS.key(i);
if (key.startsWith(prefix) && key.endsWith(suffix)) {
const modelPath = getModelPathFromKey(key);
out[modelPath] = JSON.parse(this.LS.getItem(key));
}
}
return out;
}
async removeModel(path) {
path = local_storage_maybeStripScheme(path);
const keys = getModelKeys(path);
if (this.LS.getItem(keys.info) == null) {
throw new Error(`Cannot find model at path '${path}'`);
}
const info = JSON.parse(this.LS.getItem(keys.info));
this.LS.removeItem(keys.info);
this.LS.removeItem(keys.topology);
this.LS.removeItem(keys.weightSpecs);
this.LS.removeItem(keys.weightData);
return info;
}
}
if (Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
// Wrap the construction and registration, to guard against browsers that
// don't support Local Storage.
try {
model_management_ModelStoreManagerRegistry.registerManager(
local_storage_BrowserLocalStorage.URL_SCHEME,
new local_storage_BrowserLocalStorageManager()
);
} catch (err) {}
}
//# sourceMappingURL=local_storage.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* IOHandlers related to files, such as browser-triggered file downloads,
* user-selected files in browser.
*/
const DEFAULT_FILE_NAME_PREFIX = "model";
const DEFAULT_JSON_EXTENSION_NAME = ".json";
const DEFAULT_WEIGHT_DATA_EXTENSION_NAME = ".weights.bin";
function defer(f) {
return new Promise((resolve) => setTimeout(resolve)).then(f);
}
class browser_files_BrowserDownloads {
constructor(fileNamePrefix) {
if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
// TODO(cais): Provide info on what IOHandlers are available under the
// current environment.
throw new Error(
"browserDownloads() cannot proceed because the current environment " +
"is not a browser."
);
}
if (fileNamePrefix.startsWith(browser_files_BrowserDownloads.URL_SCHEME)) {
fileNamePrefix = fileNamePrefix.slice(browser_files_BrowserDownloads.URL_SCHEME.length);
}
if (fileNamePrefix == null || fileNamePrefix.length === 0) {
fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;
}
this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;
this.weightDataFileName = fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;
}
async save(modelArtifacts) {
if (typeof document === "undefined") {
throw new Error(
"Browser downloads are not supported in " +
"this environment since `document` is not present"
);
}
const weightsURL = window.URL.createObjectURL(
new Blob([modelArtifacts.weightData], { type: "application/octet-stream" })
);
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error(
"BrowserDownloads.save() does not support saving model topology " +
"in binary formats yet."
);
} else {
const weightsManifest = [
{
paths: ["./" + this.weightDataFileName],
weights: modelArtifacts.weightSpecs,
},
];
const modelTopologyAndWeightManifest = {
modelTopology: modelArtifacts.modelTopology,
format: modelArtifacts.format,
generatedBy: modelArtifacts.generatedBy,
convertedBy: modelArtifacts.convertedBy,
weightsManifest,
};
const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(
new Blob([JSON.stringify(modelTopologyAndWeightManifest)], {
type: "application/json",
})
);
// If anchor elements are not provided, create them without attaching them
// to parents, so that the downloaded file names can be controlled.
const jsonAnchor =
this.jsonAnchor == null ? document.createElement("a") : this.jsonAnchor;
jsonAnchor.download = this.modelTopologyFileName;
jsonAnchor.href = modelTopologyAndWeightManifestURL;
// Trigger downloads by evoking a click event on the download anchors.
// When multiple downloads are started synchronously, Firefox will only
// save the last one.
await defer(() => jsonAnchor.dispatchEvent(new MouseEvent("click")));
if (modelArtifacts.weightData != null) {
const weightDataAnchor =
this.weightDataAnchor == null ? document.createElement("a") : this.weightDataAnchor;
weightDataAnchor.download = this.weightDataFileName;
weightDataAnchor.href = weightsURL;
await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent("click")));
}
return {
modelArtifactsInfo: Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
modelArtifacts
),
};
}
}
}
browser_files_BrowserDownloads.URL_SCHEME = "downloads://";
class browser_files_BrowserFiles {
constructor(files) {
if (files == null || files.length < 1) {
throw new Error(
`When calling browserFiles, at least 1 file is required, ` + `but received ${files}`
);
}
this.files = files;
}
async load() {
const jsonFile = this.files[0];
const weightFiles = this.files.slice(1);
return new Promise((resolve, reject) => {
const jsonReader = new FileReader();
jsonReader.onload = (event) => {
// tslint:disable-next-line:no-any
const modelJSON = JSON.parse(event.target.result);
const modelTopology = modelJSON.modelTopology;
if (modelTopology == null) {
reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));
return;
}
if (weightFiles.length === 0) {
resolve({ modelTopology });
}
const weightsManifest = modelJSON.weightsManifest;
if (weightsManifest == null) {
reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));
return;
}
let pathToFile;
try {
pathToFile = this.checkManifestAndWeightFiles(weightsManifest, weightFiles);
} catch (err) {
reject(err);
return;
}
const weightSpecs = [];
const paths = [];
const perFileBuffers = [];
weightsManifest.forEach((weightsGroup) => {
weightsGroup.paths.forEach((path) => {
paths.push(path);
perFileBuffers.push(null);
});
weightSpecs.push(...weightsGroup.weights);
});
weightsManifest.forEach((weightsGroup) => {
weightsGroup.paths.forEach((path) => {
const weightFileReader = new FileReader();
weightFileReader.onload = (event) => {
// tslint:disable-next-line:no-any
const weightData = event.target.result;
const index = paths.indexOf(path);
perFileBuffers[index] = weightData;
if (perFileBuffers.indexOf(null) === -1) {
resolve({
modelTopology,
weightSpecs,
weightData: Object(io_utils["d" /* concatenateArrayBuffers */])(
perFileBuffers
),
format: modelJSON.format,
generatedBy: modelJSON.generatedBy,
convertedBy: modelJSON.convertedBy,
userDefinedMetadata: modelJSON.userDefinedMetadata,
});
}
};
weightFileReader.onerror = (error) =>
reject(`Failed to weights data from file of path '${path}'.`);
weightFileReader.readAsArrayBuffer(pathToFile[path]);
});
});
};
jsonReader.onerror = (error) =>
reject(
`Failed to read model topology and weights manifest JSON ` +
`from file '${jsonFile.name}'. BrowserFiles supports loading ` +
`Keras-style tf.Model artifacts only.`
);
jsonReader.readAsText(jsonFile);
});
}
/**
* Check the compatibility between weights manifest and weight files.
*/
checkManifestAndWeightFiles(manifest, files) {
const basenames = [];
const fileNames = files.map((file) => Object(io_utils["c" /* basename */])(file.name));
const pathToFile = {};
for (const group of manifest) {
group.paths.forEach((path) => {
const pathBasename = Object(io_utils["c" /* basename */])(path);
if (basenames.indexOf(pathBasename) !== -1) {
throw new Error(
`Duplicate file basename found in weights manifest: ` + `'${pathBasename}'`
);
}
basenames.push(pathBasename);
if (fileNames.indexOf(pathBasename) === -1) {
throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);
} else {
pathToFile[path] = files[fileNames.indexOf(pathBasename)];
}
});
}
if (basenames.length !== files.length) {
throw new Error(
`Mismatch in the number of files in weights manifest ` +
`(${basenames.length}) and the number of weight files provided ` +
`(${files.length}).`
);
}
return pathToFile;
}
}
const browserDownloadsRouter = (url) => {
if (!Object(environment["c" /* env */])().getBool("IS_BROWSER")) {
return null;
} else {
if (!Array.isArray(url) && url.startsWith(browser_files_BrowserDownloads.URL_SCHEME)) {
return browserDownloads(url.slice(browser_files_BrowserDownloads.URL_SCHEME.length));
} else {
return null;
}
}
};
IORouterRegistry.registerSaveRouter(browserDownloadsRouter);
/**
* Creates an IOHandler that triggers file downloads from the browser.
*
* The returned `IOHandler` instance can be used as model exporting methods such
* as `tf.Model.save` and supports only saving.
*
* ```js
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* const saveResult = await model.save('downloads://mymodel');
* // This will trigger downloading of two files:
* // 'mymodel.json' and 'mymodel.weights.bin'.
* console.log(saveResult);
* ```
*
* @param fileNamePrefix Prefix name of the files to be downloaded. For use with
* `tf.Model`, `fileNamePrefix` should follow either of the following two
* formats:
* 1. `null` or `undefined`, in which case the default file
* names will be used:
* - 'model.json' for the JSON file containing the model topology and
* weights manifest.
* - 'model.weights.bin' for the binary file containing the binary weight
* values.
* 2. A single string or an Array of a single string, as the file name prefix.
* For example, if `'foo'` is provided, the downloaded JSON
* file and binary weights file will be named 'foo.json' and
* 'foo.weights.bin', respectively.
* @param config Additional configuration for triggering downloads.
* @returns An instance of `BrowserDownloads` `IOHandler`.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
function browserDownloads(fileNamePrefix = "model") {
return new browser_files_BrowserDownloads(fileNamePrefix);
}
/**
* Creates an IOHandler that loads model artifacts from user-selected files.
*
* This method can be used for loading from files such as user-selected files
* in the browser.
* When used in conjunction with `tf.loadLayersModel`, an instance of
* `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
*
* ```js
* // Note: This code snippet won't run properly without the actual file input
* // elements in the HTML DOM.
*
* // Suppose there are two HTML file input (``)
* // elements.
* const uploadJSONInput = document.getElementById('upload-json');
* const uploadWeightsInput = document.getElementById('upload-weights');
* const model = await tf.loadLayersModel(tf.io.browserFiles(
* [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));
* ```
*
* @param files `File`s to load from. Currently, this function supports only
* loading from files that contain Keras-style models (i.e., `tf.Model`s), for
* which an `Array` of `File`s is expected (in that order):
* - A JSON file containing the model topology and weight manifest.
* - Optionally, One or more binary files containing the binary weights.
* These files must have names that match the paths in the `weightsManifest`
* contained by the aforementioned JSON file, or errors will be thrown
* during loading. These weights files have the same format as the ones
* generated by `tensorflowjs_converter` that comes with the `tensorflowjs`
* Python PIP package. If no weights files are provided, only the model
* topology will be loaded from the JSON file above.
* @returns An instance of `Files` `IOHandler`.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
function browserFiles(files) {
return new browser_files_BrowserFiles(files);
}
//# sourceMappingURL=browser_files.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/progress.js
/**
* @license
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Monitor Promise.all progress, fire onProgress callback function.
*
* @param promises Promise list going to be monitored
* @param onProgress Callback function. Fired when a promise resolved.
* @param startFraction Optional fraction start. Default to 0.
* @param endFraction Optional fraction end. Default to 1.
*/
function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {
checkPromises(promises);
startFraction = startFraction == null ? 0 : startFraction;
endFraction = endFraction == null ? 1 : endFraction;
checkFraction(startFraction, endFraction);
let resolvedPromise = 0;
const registerMonitor = (promise) => {
promise.then((value) => {
const fraction =
startFraction + (++resolvedPromise / promises.length) * (endFraction - startFraction);
// pass fraction as parameter to callback function.
onProgress(fraction);
return value;
});
return promise;
};
function checkPromises(promises) {
Object(util["assert"])(
promises != null && Array.isArray(promises) && promises.length > 0,
() => "promises must be a none empty array"
);
}
function checkFraction(startFraction, endFraction) {
Object(util["assert"])(
startFraction >= 0 && startFraction <= 1,
() =>
`Progress fraction must be in range [0, 1], but ` +
`got startFraction ${startFraction}`
);
Object(util["assert"])(
endFraction >= 0 && endFraction <= 1,
() =>
`Progress fraction must be in range [0, 1], but ` + `got endFraction ${endFraction}`
);
Object(util["assert"])(
endFraction >= startFraction,
() =>
`startFraction must be no more than endFraction, but ` +
`got startFraction ${startFraction} and endFraction ` +
`${endFraction}`
);
}
return Promise.all(promises.map(registerMonitor));
}
//# sourceMappingURL=progress.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/types.js
var types = __webpack_require__(34);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reads binary weights data from a number of URLs.
*
* @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.
* @param requestOptions RequestInit (options) for the HTTP requests.
* @param fetchFunc Optional overriding value for the `window.fetch` function.
* @param onProgress Optional, progress callback function, fired periodically
* before the load is completed.
* @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same
* length as `fetchURLs`.
*/
async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {
if (loadOptions == null) {
loadOptions = {};
}
const fetchFunc =
loadOptions.fetchFunc == null
? Object(environment["c" /* env */])().platform.fetch
: loadOptions.fetchFunc;
// Create the requests for all of the weights in parallel.
const requests = fetchURLs.map((fetchURL) =>
fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true })
);
const fetchStartFraction = 0;
const fetchEndFraction = 0.5;
const responses =
loadOptions.onProgress == null
? await Promise.all(requests)
: await monitorPromisesProgress(
requests,
loadOptions.onProgress,
fetchStartFraction,
fetchEndFraction
);
const bufferPromises = responses.map((response) => response.arrayBuffer());
const bufferStartFraction = 0.5;
const bufferEndFraction = 1;
const buffers =
loadOptions.onProgress == null
? await Promise.all(bufferPromises)
: await monitorPromisesProgress(
bufferPromises,
loadOptions.onProgress,
bufferStartFraction,
bufferEndFraction
);
return buffers;
}
/**
* Reads a weights manifest JSON configuration, fetches the weights and
* returns them as `Tensor`s.
*
* @param manifest The weights manifest JSON.
* @param filePathPrefix The path prefix for filenames given in the manifest.
* Defaults to the empty string.
* @param weightNames The names of the weights to be fetched.
*/
async function loadWeights(manifest, filePathPrefix = "", weightNames, requestInit) {
// TODO(nsthorat): Groups are currently fetched atomically. If you need a
// single weight from a group, the whole group will be fetched. At a future
// date, we should support fetching only the individual shards within a
// group that are needed to reconstruct the requested weight.
// TODO(cais): Use `decodeWeights` for implementation.
const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });
const loadWeights = weightsLoaderFactory(fetchWeights);
return loadWeights(manifest, filePathPrefix, weightNames);
}
/**
* Creates a function, which reads a weights manifest JSON configuration,
* fetches the weight files using the specified function and returns them as
* `Tensor`s.
*
* ```js
* // example for creating a nodejs weight loader, which reads the weight files
* // from disk using fs.readFileSync
*
* import * as fs from 'fs'
*
* const fetchWeightsFromDisk = (filePaths: string[]) =>
* filePaths.map(filePath => fs.readFileSync(filePath).buffer)
*
* const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)
*
* const manifest = JSON.parse(
* fs.readFileSync('./my_model-weights_manifest').toString()
* )
* const weightMap = await loadWeights(manifest, './')
* ```
* @param fetchWeightsFunction The function used for fetching the weight files.
* @returns Weight loading function.
*/
function weightsLoaderFactory(fetchWeightsFunction) {
return async (manifest, filePathPrefix = "", weightNames) => {
// Collect all the groups, weights, and their relative offsets to be
// fetched.
const groupIndicesToFetchMap = manifest.map(() => false);
const groupWeightsToFetch = {};
const weightsFound = weightNames != null ? weightNames.map(() => false) : [];
const allManifestWeightNames = [];
manifest.forEach((manifestGroupConfig, groupIndex) => {
let groupOffset = 0;
manifestGroupConfig.weights.forEach((weightsEntry) => {
const rawDtype =
"quantization" in weightsEntry
? weightsEntry.quantization.dtype
: weightsEntry.dtype;
const weightsBytes =
types["a" /* DTYPE_VALUE_SIZE_MAP */][rawDtype] *
util["sizeFromShape"](weightsEntry.shape);
const enqueueWeightsForFetchingFn = () => {
groupIndicesToFetchMap[groupIndex] = true;
if (groupWeightsToFetch[groupIndex] == null) {
groupWeightsToFetch[groupIndex] = [];
}
groupWeightsToFetch[groupIndex].push({
manifestEntry: weightsEntry,
groupOffset,
sizeBytes: weightsBytes,
});
};
if (weightNames != null) {
weightNames.forEach((weightName, weightIndex) => {
if (weightName === weightsEntry.name) {
enqueueWeightsForFetchingFn();
weightsFound[weightIndex] = true;
}
});
} else {
enqueueWeightsForFetchingFn();
}
allManifestWeightNames.push(weightsEntry.name);
groupOffset += weightsBytes;
});
});
if (!weightsFound.every((found) => found)) {
const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);
throw new Error(
`Could not find weights in manifest with names: ` +
`${weightsNotFound.join(", ")}. \n` +
`Manifest JSON has weights with names: ` +
`${allManifestWeightNames.join(", ")}.`
);
}
// Convert the one-hot boolean groupId => shouldFetch map to a list of group
// IDs.
const groupIndicesToFetch = groupIndicesToFetchMap.reduce(
(accumulator, shouldFetch, i) => {
if (shouldFetch) {
accumulator.push(i);
}
return accumulator;
},
[]
);
const fetchUrls = [];
groupIndicesToFetch.forEach((i) => {
manifest[i].paths.forEach((filepath) => {
const fetchUrl =
filePathPrefix + (!filePathPrefix.endsWith("/") ? "/" : "") + filepath;
fetchUrls.push(fetchUrl);
});
});
const buffers = await fetchWeightsFunction(fetchUrls);
const weightsTensorMap = {};
let bufferIndexOffset = 0;
groupIndicesToFetch.forEach((i) => {
const numBuffers = manifest[i].paths.length;
let groupBytes = 0;
for (let i = 0; i < numBuffers; i++) {
groupBytes += buffers[bufferIndexOffset + i].byteLength;
}
// Create a buffer for the whole group.
const groupBuffer = new ArrayBuffer(groupBytes);
const groupByteBuffer = new Uint8Array(groupBuffer);
let groupBufferOffset = 0;
for (let i = 0; i < numBuffers; i++) {
const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);
groupByteBuffer.set(buffer, groupBufferOffset);
groupBufferOffset += buffer.byteLength;
}
const weightsEntries = groupWeightsToFetch[i];
weightsEntries.forEach((weightsEntry) => {
const byteBuffer = groupBuffer.slice(
weightsEntry.groupOffset,
weightsEntry.groupOffset + weightsEntry.sizeBytes
);
const nameToTensorMap = Object(io_utils["e" /* decodeWeights */])(byteBuffer, [
weightsEntry.manifestEntry,
]);
for (const name in nameToTensorMap) {
weightsTensorMap[name] = nameToTensorMap[name];
}
});
bufferIndexOffset += numBuffers;
});
return weightsTensorMap;
};
}
//# sourceMappingURL=weights_loader.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/http.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* IOHandler implementations based on HTTP requests in the web browser.
*
* Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).
*/
const OCTET_STREAM_MIME_TYPE = "application/octet-stream";
const JSON_TYPE = "application/json";
class http_HTTPRequest {
constructor(path, loadOptions) {
this.DEFAULT_METHOD = "POST";
if (loadOptions == null) {
loadOptions = {};
}
this.weightPathPrefix = loadOptions.weightPathPrefix;
this.onProgress = loadOptions.onProgress;
if (loadOptions.fetchFunc != null) {
Object(util["assert"])(
typeof loadOptions.fetchFunc === "function",
() =>
"Must pass a function that matches the signature of " +
"`fetch` (see " +
"https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"
);
this.fetch = loadOptions.fetchFunc;
} else {
this.fetch = Object(environment["c" /* env */])().platform.fetch;
}
Object(util["assert"])(
path != null && path.length > 0,
() => "URL path for http must not be null, undefined or " + "empty."
);
if (Array.isArray(path)) {
Object(util["assert"])(
path.length === 2,
() =>
"URL paths for http must have a length of 2, " +
`(actual length is ${path.length}).`
);
}
this.path = path;
if (loadOptions.requestInit != null && loadOptions.requestInit.body != null) {
throw new Error("requestInit is expected to have no pre-existing body, but has one.");
}
this.requestInit = loadOptions.requestInit || {};
}
async save(modelArtifacts) {
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error(
"BrowserHTTPRequest.save() does not support saving model topology " +
"in binary formats yet."
);
}
const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);
init.body = new FormData();
const weightsManifest = [
{
paths: ["./model.weights.bin"],
weights: modelArtifacts.weightSpecs,
},
];
const modelTopologyAndWeightManifest = {
modelTopology: modelArtifacts.modelTopology,
format: modelArtifacts.format,
generatedBy: modelArtifacts.generatedBy,
convertedBy: modelArtifacts.convertedBy,
userDefinedMetadata: modelArtifacts.userDefinedMetadata,
weightsManifest,
};
init.body.append(
"model.json",
new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }),
"model.json"
);
if (modelArtifacts.weightData != null) {
init.body.append(
"model.weights.bin",
new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }),
"model.weights.bin"
);
}
const response = await this.fetch(this.path, init);
if (response.ok) {
return {
modelArtifactsInfo: Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(
modelArtifacts
),
responses: [response],
};
} else {
throw new Error(
`BrowserHTTPRequest.save() failed due to HTTP response status ` +
`${response.status}.`
);
}
}
/**
* Load model artifacts via HTTP request(s).
*
* See the documentation to `tf.io.http` for details on the saved
* artifacts.
*
* @returns The loaded model artifacts (if loading succeeds).
*/
async load() {
const modelConfigRequest = await this.fetch(this.path, this.requestInit);
if (!modelConfigRequest.ok) {
throw new Error(
`Request to ${this.path} failed with status code ` +
`${modelConfigRequest.status}. Please verify this URL points to ` +
`the model JSON of the model to load.`
);
}
let modelConfig;
try {
modelConfig = await modelConfigRequest.json();
} catch (e) {
let message = `Failed to parse model JSON of response from ${this.path}.`;
// TODO(nsthorat): Remove this after some time when we're comfortable that
// .pb files are mostly gone.
if (this.path.endsWith(".pb")) {
message +=
" Your path contains a .pb file extension. " +
"Support for .pb models have been removed in TensorFlow.js 1.0 " +
"in favor of .json models. You can re-convert your Python " +
"TensorFlow model using the TensorFlow.js 1.0 conversion scripts " +
"or you can convert your.pb models with the 'pb2json'" +
"NPM script in the tensorflow/tfjs-converter repository.";
} else {
message +=
" Please make sure the server is serving valid " + "JSON for this request.";
}
throw new Error(message);
}
const modelTopology = modelConfig.modelTopology;
const weightsManifest = modelConfig.weightsManifest;
const generatedBy = modelConfig.generatedBy;
const convertedBy = modelConfig.convertedBy;
const format = modelConfig.format;
const userDefinedMetadata = modelConfig.userDefinedMetadata;
// We do not allow both modelTopology and weightsManifest to be missing.
if (modelTopology == null && weightsManifest == null) {
throw new Error(
`The JSON from HTTP path ${this.path} contains neither model ` +
`topology or manifest for weights.`
);
}
let weightSpecs;
let weightData;
if (weightsManifest != null) {
const results = await this.loadWeights(weightsManifest);
[weightSpecs, weightData] = results;
}
return {
modelTopology,
weightSpecs,
weightData,
userDefinedMetadata,
generatedBy,
convertedBy,
format,
};
}
async loadWeights(weightsManifest) {
const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;
const [prefix, suffix] = parseUrl(weightPath);
const pathPrefix = this.weightPathPrefix || prefix;
const weightSpecs = [];
for (const entry of weightsManifest) {
weightSpecs.push(...entry.weights);
}
const fetchURLs = [];
weightsManifest.forEach((weightsGroup) => {
weightsGroup.paths.forEach((path) => {
fetchURLs.push(pathPrefix + path + suffix);
});
});
const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {
requestInit: this.requestInit,
fetchFunc: this.fetch,
onProgress: this.onProgress,
});
return [weightSpecs, Object(io_utils["d" /* concatenateArrayBuffers */])(buffers)];
}
}
http_HTTPRequest.URL_SCHEME_REGEX = /^https?:\/\//;
/**
* Extract the prefix and suffix of the url, where the prefix is the path before
* the last file, and suffix is the search params after the last file.
* ```
* const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'
* [prefix, suffix] = parseUrl(url)
* // prefix = 'http://tfhub.dev/model/1/'
* // suffix = '?tfjs-format=file'
* ```
* @param url the model url to be parsed.
*/
function parseUrl(url) {
const lastSlash = url.lastIndexOf("/");
const lastSearchParam = url.lastIndexOf("?");
const prefix = url.substring(0, lastSlash);
const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : "";
return [prefix + "/", suffix];
}
function isHTTPScheme(url) {
return url.match(http_HTTPRequest.URL_SCHEME_REGEX) != null;
}
const httpRouter = (url, loadOptions) => {
if (
typeof fetch === "undefined" &&
(loadOptions == null || loadOptions.fetchFunc == null)
) {
// `http` uses `fetch` or `node-fetch`, if one wants to use it in
// an environment that is not the browser or node they have to setup a
// global fetch polyfill.
return null;
} else {
let isHTTP = true;
if (Array.isArray(url)) {
isHTTP = url.every((urlItem) => isHTTPScheme(urlItem));
} else {
isHTTP = isHTTPScheme(url);
}
if (isHTTP) {
return http(url, loadOptions);
}
}
return null;
};
IORouterRegistry.registerSaveRouter(httpRouter);
IORouterRegistry.registerLoadRouter(httpRouter);
/**
* Creates an IOHandler subtype that sends model artifacts to HTTP server.
*
* An HTTP request of the `multipart/form-data` mime type will be sent to the
* `path` URL. The form data includes artifacts that represent the topology
* and/or weights of the model. In the case of Keras-style `tf.Model`, two
* blobs (files) exist in form-data:
* - A JSON file consisting of `modelTopology` and `weightsManifest`.
* - A binary weights file consisting of the concatenated weight values.
* These files are in the same format as the one generated by
* [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).
*
* The following code snippet exemplifies the client-side code that uses this
* function:
*
* ```js
* const model = tf.sequential();
* model.add(
* tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
*
* const saveResult = await model.save(tf.io.http(
* 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));
* console.log(saveResult);
* ```
*
* If the default `POST` method is to be used, without any custom parameters
* such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:
*
* ```js
* const saveResult = await model.save('http://model-server:5000/upload');
* ```
*
* The following GitHub Gist
* https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864
* implements a server based on [flask](https://github.com/pallets/flask) that
* can receive the request. Upon receiving the model artifacts via the requst,
* this particular server reconsistutes instances of [Keras
* Models](https://keras.io/models/model/) in memory.
*
*
* @param path A URL path to the model.
* Can be an absolute HTTP path (e.g.,
* 'http://localhost:8000/model-upload)') or a relative path (e.g.,
* './model-upload').
* @param requestInit Request configurations to be used when sending
* HTTP request to server using `fetch`. It can contain fields such as
* `method`, `credentials`, `headers`, `mode`, etc. See
* https://developer.mozilla.org/en-US/docs/Web/API/Request/Request
* for more information. `requestInit` must not have a body, because the
* body will be set by TensorFlow.js. File blobs representing the model
* topology (filename: 'model.json') and the weights of the model (filename:
* 'model.weights.bin') will be appended to the body. If `requestInit` has a
* `body`, an Error will be thrown.
* @param loadOptions Optional configuration for the loading. It includes the
* following fields:
* - weightPathPrefix Optional, this specifies the path prefix for weight
* files, by default this is calculated from the path param.
* - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,
* the `fetch` from node-fetch can be used here.
* - onProgress Optional, progress callback function, fired periodically
* before the load is completed.
* @returns An instance of `IOHandler`.
*/
/**
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
function http(path, loadOptions) {
return new http_HTTPRequest(path, loadOptions);
}
/**
* Deprecated. Use `tf.io.http`.
* @param path
* @param loadOptions
*/
function browserHTTPRequest(path, loadOptions) {
return http(path, loadOptions);
}
//# sourceMappingURL=http.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class PassthroughLoader {
constructor(modelArtifacts) {
this.modelArtifacts = modelArtifacts;
}
async load() {
return this.modelArtifacts;
}
}
class PassthroughSaver {
constructor(saveHandler) {
this.saveHandler = saveHandler;
}
async save(modelArtifacts) {
return this.saveHandler(modelArtifacts);
}
}
/**
* Creates an IOHandler that loads model artifacts from memory.
*
* When used in conjunction with `tf.loadLayersModel`, an instance of
* `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
*
* ```js
* const model = await tf.loadLayersModel(tf.io.fromMemory(
* modelTopology, weightSpecs, weightData));
* ```
*
* @param modelArtifacts a object containing model topology (i.e., parsed from
* the JSON format).
* @param weightSpecs An array of `WeightsManifestEntry` objects describing the
* names, shapes, types, and quantization of the weight data.
* @param weightData A single `ArrayBuffer` containing the weight data,
* concatenated in the order described by the weightSpecs.
* @param trainingConfig Model training configuration. Optional.
*
* @returns A passthrough `IOHandler` that simply loads the provided data.
*/
function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {
if (arguments.length === 1) {
const isModelArtifacts =
modelArtifacts.modelTopology != null || modelArtifacts.weightSpecs != null;
if (isModelArtifacts) {
return new PassthroughLoader(modelArtifacts);
} else {
// Legacy support: with only modelTopology.
// TODO(cais): Remove this deprecated API.
console.warn(
"Please call tf.io.fromMemory() with only one argument. " +
"The argument should be of type ModelArtifacts. " +
"The multi-argument signature of tf.io.fromMemory() has been " +
"deprecated and will be removed in a future release."
);
return new PassthroughLoader({ modelTopology: modelArtifacts });
}
} else {
// Legacy support.
// TODO(cais): Remove this deprecated API.
console.warn(
"Please call tf.io.fromMemory() with only one argument. " +
"The argument should be of type ModelArtifacts. " +
"The multi-argument signature of tf.io.fromMemory() has been " +
"deprecated and will be removed in a future release."
);
return new PassthroughLoader({
modelTopology: modelArtifacts,
weightSpecs,
weightData,
trainingConfig,
});
}
}
/**
* Creates an IOHandler that passes saved model artifacts to a callback.
*
* ```js
* function handleSave(artifacts) {
* // ... do something with the artifacts ...
* return {modelArtifactsInfo: {...}, ...};
* }
*
* const saveResult = model.save(tf.io.withSaveHandler(handleSave));
* ```
*
* @param saveHandler A function that accepts a `ModelArtifacts` and returns a
* `SaveResult`.
*/
function withSaveHandler(saveHandler) {
return new PassthroughSaver(saveHandler);
}
//# sourceMappingURL=passthrough.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/io.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Importing local_storage and indexed_db is necessary for the routers to be
// registered.
//# sourceMappingURL=io.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates a one-hot `tf.Tensor`. The locations represented by `indices` take
* value `onValue` (defaults to 1), while all other locations take value
* `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank
* `R+1` with the last axis of size `depth`.
*
* ```js
* tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();
* ```
*
* @param indices `tf.Tensor` of indices with dtype `int32`.
* @param depth The depth of the one hot dimension.
* @param onValue A number used to fill in the output when the index matches
* the location.
* @param offValue A number used to fill in the output when the index does
* not match the location.
*/
/** @doc {heading: 'Tensors', subheading: 'Creation'} */
function oneHot_(indices, depth, onValue = 1, offValue = 0) {
if (depth < 2) {
throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);
}
let $indices = Object(tensor_util_env["a" /* convertToTensor */])(
indices,
"indices",
"oneHot",
"int32"
);
const outShape = [...$indices.shape, depth];
$indices = $indices.flatten();
const forward = (backend, save) => {
save([$indices]);
return reshape(backend.oneHot($indices, depth, onValue, offValue), outShape);
};
const inputs = { indices: $indices };
const attrs = { depth, onValue, offValue };
return engine["a" /* ENGINE */].runKernelFunc(
forward,
inputs,
null /* grad */,
kernel_names["cb" /* OneHot */],
attrs
);
}
const oneHot = Object(operation["a" /* op */])({ oneHot_ });
//# sourceMappingURL=one_hot.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the confusion matrix from true labels and predicted labels.
*
* ```js
* const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');
* const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');
* const numClasses = 3;
* const out = tf.math.confusionMatrix(labels, predictions, numClasses);
* out.print();
* // Expected output matrix:
* // [[2, 0, 0],
* // [0, 1, 1],
* // [0, 0, 1]]
* ```
*
* @param labels The target labels, assumed to be 0-based integers
* for the classes. The shape is `[numExamples]`, where
* `numExamples` is the number of examples included.
* @param predictions The predicted classes, assumed to be
* 0-based integers for the classes. Must have the same shape as `labels`.
* @param numClasses Number of all classes, as an integer.
* Its value must be larger than the largest element in `labels` and
* `predictions`.
* @returns The confusion matrix as a int32-type 2D tensor. The value at
* row `r` and column `c` is the number of times examples of actual class
* `r` were predicted as class `c`.
*/
/** @doc {heading: 'Operations', subheading: 'Evaluation'} */
function confusionMatrix_(labels, predictions, numClasses) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(
labels,
"labels",
"confusionMatrix"
);
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(
predictions,
"predictions",
"confusionMatrix"
);
util["assert"](
numClasses == null || (numClasses > 0 && Number.isInteger(numClasses)),
() => `If provided, numClasses must be a positive integer, ` + `but got ${numClasses}`
);
util["assert"](
$labels.rank === 1,
() => `Expected the rank of labels to be 1, but got ${$labels.rank}`
);
util["assert"](
$predictions.rank === 1,
() => `Expected the rank of predictions to be 1, ` + `but got ${$predictions.rank}`
);
util["assert"](
$labels.shape[0] === $predictions.shape[0],
() =>
`Mismatch in the number of examples: ` +
`${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +
`Labels and predictions should have the same number of elements.`
);
util["assert"](
numClasses > 0 && Number.isInteger(numClasses),
() => `numClasses is required to be a positive integer, but got ` + `${numClasses}`
);
// TODO(cais): In the future, if oneHot supports tensors inputs for
// `numClasses`, `confusionMatrix` can make `numClasses` optional.
const oneHotLabels = oneHot($labels.asType("int32"), numClasses);
const oneHotPredictions = oneHot($predictions.asType("int32"), numClasses);
const oneHotLabelsT = oneHotLabels.transpose();
return oneHotLabelsT.matMul(oneHotPredictions).asType("int32");
}
const confusionMatrix = Object(operation["a" /* op */])({ confusionMatrix_ });
//# sourceMappingURL=confusion_matrix.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/math.js
/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Exports under the tf.math.* namespace.
*/
//# sourceMappingURL=math.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/browser.js
/**
* @license
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
let fromPixels2DContext;
/**
* Creates a `tf.Tensor` from an image.
*
* ```js
* const image = new ImageData(1, 1);
* image.data[0] = 100;
* image.data[1] = 150;
* image.data[2] = 200;
* image.data[3] = 255;
*
* tf.browser.fromPixels(image).print();
* ```
*
* @param pixels The input image to construct the tensor from. The
* supported image types are all 4-channel. You can also pass in an image
* object with following attributes:
* `{data: Uint8Array; width: number; height: number}`
* @param numChannels The number of channels of the output tensor. A
* numChannels value less than 4 allows you to ignore channels. Defaults to
* 3 (ignores alpha channel of input image).
*/
/** @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true} */
function fromPixels_(pixels, numChannels = 3) {
// Sanity checks.
if (numChannels > 4) {
throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");
}
if (pixels == null) {
throw new Error("pixels passed to tf.browser.fromPixels() can not be null");
}
let isPixelData = false;
let isImageData = false;
let isVideo = false;
let isImage = false;
let isCanvasLike = false;
if (pixels.data instanceof Uint8Array) {
isPixelData = true;
} else if (typeof ImageData !== "undefined" && pixels instanceof ImageData) {
isImageData = true;
} else if (typeof HTMLVideoElement !== "undefined" && pixels instanceof HTMLVideoElement) {
isVideo = true;
} else if (typeof HTMLImageElement !== "undefined" && pixels instanceof HTMLImageElement) {
isImage = true;
// tslint:disable-next-line: no-any
} else if (pixels.getContext != null) {
isCanvasLike = true;
} else {
throw new Error(
"pixels passed to tf.browser.fromPixels() must be either an " +
`HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +
`in browser, or OffscreenCanvas, ImageData in webworker` +
` or {data: Uint32Array, width: number, height: number}, ` +
`but was ${pixels.constructor.name}`
);
}
if (isVideo) {
const HAVE_CURRENT_DATA_READY_STATE = 2;
if (isVideo && pixels.readyState < HAVE_CURRENT_DATA_READY_STATE) {
throw new Error(
"The video element has not loaded data yet. Please wait for " +
"`loadeddata` event on the