repo_id
stringlengths 18
103
| file_path
stringlengths 30
136
| content
stringlengths 2
3.36M
| __index_level_0__
int64 0
0
|
---|---|---|---|
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/kenlm_android-x86_64-cpu-opt.yml | build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.kenlm.android_x86_64_cpu.url}
artifact_namespace: ${system.kenlm.android_x86_64_cpu.namespace}
system_config:
>
${kenlm.packages_android.apt}
docker_image: "ubuntu:20.04"
scripts:
setup: "taskcluster/kenlm_tc-setup.sh --android-x86_64"
build: "taskcluster/kenlm_tc-build.sh --android-x86_64"
package: "taskcluster/kenlm_tc-package.sh"
workerType: "${docker.dsBuild}"
metadata:
name: "KenLM Android x86_64 CPU"
description: "Building KenLM for Android/x86_64, CPU only, optimized version"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstpush.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
#include <fst/weight.h>
DEFINE_double(delta, fst::kDelta, "Comparison/quantization delta");
DEFINE_bool(push_weights, false, "Push weights");
DEFINE_bool(push_labels, false, "Push output labels");
DEFINE_bool(remove_total_weight, false,
"Remove total weight when pushing weights");
DEFINE_bool(remove_common_affix, false,
"Remove common prefix/suffix when pushing labels");
DEFINE_bool(to_final, false, "Push/reweight to final (vs. to initial) states");
int fstpush_main(int argc, char **argv);
int main(int argc, char **argv) { return fstpush_main(argc, argv); }
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/compress/fstrandmod.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Generates a random FST according to a class-specific transition model.
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <memory>
#include <string>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/extensions/compress/randmod.h>
#include <fst/fstlib.h>
DEFINE_int32(seed, time(0), "Random seed");
DEFINE_int32(states, 10, "# of states");
DEFINE_int32(labels, 2, "# of labels");
DEFINE_int32(classes, 1, "# of probability distributions");
DEFINE_bool(transducer, false, "Output a transducer");
DEFINE_bool(weights, false, "Output a weighted FST");
int main(int argc, char **argv) {
using fst::StdVectorFst;
using fst::StdArc;
using fst::TropicalWeight;
using fst::WeightGenerate;
string usage = "Generates a random FST.\n\n Usage: ";
usage += argv[0];
usage += "[out.fst]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
if (argc > 2) {
ShowUsage();
return 1;
}
string out_name = (argc > 1 && (strcmp(argv[1], "-") != 0)) ? argv[1] : "";
srand(FLAGS_seed);
int num_states = (rand() % FLAGS_states) + 1; // NOLINT
int num_classes = (rand() % FLAGS_classes) + 1; // NOLINT
int num_labels = (rand() % FLAGS_labels) + 1; // NOLINT
StdVectorFst fst;
using TropicalWeightGenerate = WeightGenerate<TropicalWeight>;
std::unique_ptr<TropicalWeightGenerate> generate(FLAGS_weights ?
new TropicalWeightGenerate(false) : nullptr);
fst::RandMod<StdArc, TropicalWeightGenerate> rand_mod(num_states,
num_classes, num_labels, FLAGS_transducer, generate.get());
rand_mod.Generate(&fst);
fst.Write(out_name);
return 0;
}
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/compile.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <istream>
#include <string>
#include <fst/script/compile.h>
#include <fst/script/fst-class.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void CompileFst(std::istream &istrm, const string &source, const string &dest,
const string &fst_type, const string &arc_type,
const SymbolTable *isyms, const SymbolTable *osyms,
const SymbolTable *ssyms, bool accep, bool ikeep, bool okeep,
bool nkeep, bool allow_negative_labels) {
std::unique_ptr<FstClass> fst(
CompileFstInternal(istrm, source, fst_type, arc_type, isyms, osyms, ssyms,
accep, ikeep, okeep, nkeep, allow_negative_labels));
fst->Write(dest);
}
FstClass *CompileFstInternal(std::istream &istrm, const string &source,
const string &fst_type, const string &arc_type,
const SymbolTable *isyms, const SymbolTable *osyms,
const SymbolTable *ssyms, bool accep, bool ikeep,
bool okeep, bool nkeep,
bool allow_negative_labels) {
CompileFstInnerArgs iargs(istrm, source, fst_type, isyms, osyms, ssyms, accep,
ikeep, okeep, nkeep, allow_negative_labels);
CompileFstArgs args(iargs);
Apply<Operation<CompileFstArgs>>("CompileFstInternal", arc_type, &args);
return args.retval;
}
// This registers 2; 1 does not require registration.
REGISTER_FST_OPERATION(CompileFstInternal, StdArc, CompileFstArgs);
REGISTER_FST_OPERATION(CompileFstInternal, LogArc, CompileFstArgs);
REGISTER_FST_OPERATION(CompileFstInternal, Log64Arc, CompileFstArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/randequivalent.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Tests if two FSTS are equivalent by checking if random strings from one FST
// are transduced the same by both FSTs.
#ifndef FST_RANDEQUIVALENT_H_
#define FST_RANDEQUIVALENT_H_
#include <fst/log.h>
#include <fst/arcsort.h>
#include <fst/compose.h>
#include <fst/project.h>
#include <fst/randgen.h>
#include <fst/shortest-distance.h>
#include <fst/vector-fst.h>
namespace fst {
// Test if two FSTs are stochastically equivalent by randomly generating
// random paths through the FSTs.
//
// For each randomly generated path, the algorithm computes for each
// of the two FSTs the sum of the weights of all the successful paths
// sharing the same input and output labels as the considered randomly
// generated path and checks that these two values are within a user-specified
// delta. Returns optional error value (when FLAGS_error_fatal = false).
template <class Arc, class ArcSelector>
bool RandEquivalent(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
int32_t num_paths, float delta,
const RandGenOptions<ArcSelector> &opts,
bool *error = nullptr) {
using Weight = typename Arc::Weight;
if (error) *error = false;
// Checks that the symbol table are compatible.
if (!CompatSymbols(fst1.InputSymbols(), fst2.InputSymbols()) ||
!CompatSymbols(fst1.OutputSymbols(), fst2.OutputSymbols())) {
FSTERROR() << "RandEquivalent: Input/output symbol tables of 1st "
<< "argument do not match input/output symbol tables of 2nd "
<< "argument";
if (error) *error = true;
return false;
}
static const ILabelCompare<Arc> icomp;
static const OLabelCompare<Arc> ocomp;
VectorFst<Arc> sfst1(fst1);
VectorFst<Arc> sfst2(fst2);
Connect(&sfst1);
Connect(&sfst2);
ArcSort(&sfst1, icomp);
ArcSort(&sfst2, icomp);
bool result = true;
for (int32_t n = 0; n < num_paths; ++n) {
VectorFst<Arc> path;
const auto &fst = rand() % 2 ? sfst1 : sfst2; // NOLINT
RandGen(fst, &path, opts);
VectorFst<Arc> ipath(path);
VectorFst<Arc> opath(path);
Project(&ipath, PROJECT_INPUT);
Project(&opath, PROJECT_OUTPUT);
VectorFst<Arc> cfst1, pfst1;
Compose(ipath, sfst1, &cfst1);
ArcSort(&cfst1, ocomp);
Compose(cfst1, opath, &pfst1);
// Gives up if there are epsilon cycles in a non-idempotent semiring.
if (!(Weight::Properties() & kIdempotent) &&
pfst1.Properties(kCyclic, true)) {
continue;
}
const auto sum1 = ShortestDistance(pfst1);
VectorFst<Arc> cfst2;
Compose(ipath, sfst2, &cfst2);
ArcSort(&cfst2, ocomp);
VectorFst<Arc> pfst2;
Compose(cfst2, opath, &pfst2);
// Gives up if there are epsilon cycles in a non-idempotent semiring.
if (!(Weight::Properties() & kIdempotent) &&
pfst2.Properties(kCyclic, true)) {
continue;
}
const auto sum2 = ShortestDistance(pfst2);
if (!ApproxEqual(sum1, sum2, delta)) {
VLOG(1) << "Sum1 = " << sum1;
VLOG(1) << "Sum2 = " << sum2;
result = false;
break;
}
}
if (fst1.Properties(kError, false) || fst2.Properties(kError, false)) {
if (error) *error = true;
return false;
}
return result;
}
// Tests if two FSTs are equivalent by randomly generating a nnum_paths paths
// (no longer than the path_length) using a user-specified seed, optionally
// indicating an error setting an optional error argument to true.
template <class Arc>
bool RandEquivalent(const Fst<Arc> &fst1, const Fst<Arc> &fst2, int32_t num_paths,
float delta = kDelta, time_t seed = time(nullptr),
int32_t max_length = std::numeric_limits<int32_t>::max(),
bool *error = nullptr) {
const UniformArcSelector<Arc> uniform_selector(seed);
const RandGenOptions<UniformArcSelector<Arc>> opts(uniform_selector,
max_length);
return RandEquivalent(fst1, fst2, num_paths, delta, opts, error);
}
} // namespace fst
#endif // FST_RANDEQUIVALENT_H_
| 0 |
coqui_public_repos/STT/native_client/kenlm | coqui_public_repos/STT/native_client/kenlm/util/file_piece_test.cc | // Tests might fail if you have creative characters in your path. Sue me.
#include "file_piece.hh"
#include "file_stream.hh"
#include "file.hh"
#include "scoped.hh"
#define BOOST_TEST_MODULE FilePieceTest
#include <boost/test/unit_test.hpp>
#include <fstream>
#include <iostream>
#include <cstdio>
#include <sys/types.h>
#include <sys/stat.h>
namespace util {
namespace {
std::string FileLocation() {
if (boost::unit_test::framework::master_test_suite().argc < 2) {
return "file_piece.cc";
}
std::string ret(boost::unit_test::framework::master_test_suite().argv[1]);
return ret;
}
/* istream */
BOOST_AUTO_TEST_CASE(IStream) {
std::fstream ref(FileLocation().c_str(), std::ios::in);
std::fstream backing(FileLocation().c_str(), std::ios::in);
FilePiece test(backing);
std::string ref_line;
while (getline(ref, ref_line)) {
StringPiece test_line(test.ReadLine());
BOOST_CHECK_EQUAL(ref_line, test_line);
}
BOOST_CHECK_THROW(test.get(), EndOfFileException);
BOOST_CHECK_THROW(test.get(), EndOfFileException);
}
/* mmap implementation */
BOOST_AUTO_TEST_CASE(MMapReadLine) {
std::fstream ref(FileLocation().c_str(), std::ios::in);
FilePiece test(FileLocation().c_str(), NULL, 1);
std::string ref_line;
while (getline(ref, ref_line)) {
StringPiece test_line(test.ReadLine());
// I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924
if (!test_line.empty() || !ref_line.empty()) {
BOOST_CHECK_EQUAL(ref_line, test_line);
}
}
BOOST_CHECK_THROW(test.get(), EndOfFileException);
}
/* mmap with seek beforehand */
BOOST_AUTO_TEST_CASE(MMapSeek) {
std::fstream ref(FileLocation().c_str(), std::ios::in);
ref.seekg(10);
scoped_fd file(util::OpenReadOrThrow(FileLocation().c_str()));
SeekOrThrow(file.get(), 10);
FilePiece test(file.release());
std::string ref_line;
while (getline(ref, ref_line)) {
StringPiece test_line(test.ReadLine());
// I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924
if (!test_line.empty() || !ref_line.empty()) {
BOOST_CHECK_EQUAL(ref_line, test_line);
}
}
BOOST_CHECK_THROW(test.get(), EndOfFileException);
}
#if !defined(_WIN32) && !defined(_WIN64) && !defined(__APPLE__)
/* Apple isn't happy with the popen, fileno, dup. And I don't want to
* reimplement popen. This is an issue with the test.
*/
/* read() implementation */
BOOST_AUTO_TEST_CASE(StreamReadLine) {
std::fstream ref(FileLocation().c_str(), std::ios::in);
std::string popen_args = "cat \"";
popen_args += FileLocation();
popen_args += '"';
FILE *catter = popen(popen_args.c_str(), "r");
BOOST_REQUIRE(catter);
FilePiece test(dup(fileno(catter)), "file_piece.cc", NULL, 1);
std::string ref_line;
while (getline(ref, ref_line)) {
StringPiece test_line(test.ReadLine());
// I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924
if (!test_line.empty() || !ref_line.empty()) {
BOOST_CHECK_EQUAL(ref_line, test_line);
}
}
BOOST_CHECK_THROW(test.get(), EndOfFileException);
BOOST_REQUIRE(!pclose(catter));
}
#endif
#ifdef HAVE_ZLIB
// gzip file
BOOST_AUTO_TEST_CASE(PlainZipReadLine) {
std::string location(FileLocation());
std::fstream ref(location.c_str(), std::ios::in);
std::string command("gzip <\"");
command += location + "\" >\"" + location + "\".gz";
BOOST_REQUIRE_EQUAL(0, system(command.c_str()));
FilePiece test((location + ".gz").c_str(), NULL, 1);
unlink((location + ".gz").c_str());
std::string ref_line;
while (getline(ref, ref_line)) {
StringPiece test_line(test.ReadLine());
// I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924
if (!test_line.empty() || !ref_line.empty()) {
BOOST_CHECK_EQUAL(ref_line, test_line);
}
}
BOOST_CHECK_THROW(test.get(), EndOfFileException);
}
// gzip stream. Apple doesn't like popen, fileno, dup. This is an issue with
// the test.
#if !defined __APPLE__ && !defined __MINGW32__
BOOST_AUTO_TEST_CASE(StreamZipReadLine) {
std::fstream ref(FileLocation().c_str(), std::ios::in);
std::string command("gzip <\"");
command += FileLocation() + "\"";
FILE * catter = popen(command.c_str(), "r");
BOOST_REQUIRE(catter);
FilePiece test(dup(fileno(catter)), "file_piece.cc.gz", NULL, 1);
std::string ref_line;
while (getline(ref, ref_line)) {
StringPiece test_line(test.ReadLine());
// I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924
if (!test_line.empty() || !ref_line.empty()) {
BOOST_CHECK_EQUAL(ref_line, test_line);
}
}
BOOST_CHECK_THROW(test.get(), EndOfFileException);
BOOST_REQUIRE(!pclose(catter));
}
#endif // __APPLE__
#endif // HAVE_ZLIB
BOOST_AUTO_TEST_CASE(Numbers) {
scoped_fd file(MakeTemp(FileLocation()));
const float floating = 3.2;
{
util::FileStream writing(file.get());
writing << "94389483984398493890287 " << floating << " 5";
}
SeekOrThrow(file.get(), 0);
util::FilePiece f(file.release());
BOOST_CHECK_THROW(f.ReadULong(), ParseNumberException);
BOOST_CHECK_EQUAL("94389483984398493890287", f.ReadDelimited());
// Yes, exactly equal. Isn't double-conversion wonderful?
BOOST_CHECK_EQUAL(floating, f.ReadFloat());
BOOST_CHECK_EQUAL(5, f.ReadULong());
}
} // namespace
} // namespace util
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/tc-cpp-ds-tests.sh | #!/bin/bash
set -xe
source $(dirname "$0")/tc-tests-utils.sh
bitrate=$1
set_ldc_sample_filename "${bitrate}"
download_material "${TASKCLUSTER_TMP_DIR}/ds"
export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
check_versions
run_all_inference_tests
run_multi_inference_tests
run_cpp_only_inference_tests
run_hotword_tests
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/pair-weight.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Pair weight templated base class for weight classes that contain two weights
// (e.g. Product, Lexicographic).
#ifndef FST_PAIR_WEIGHT_H_
#define FST_PAIR_WEIGHT_H_
#include <climits>
#include <stack>
#include <string>
#include <utility>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/weight.h>
namespace fst {
template <class W1, class W2>
class PairWeight {
public:
using ReverseWeight =
PairWeight<typename W1::ReverseWeight, typename W2::ReverseWeight>;
PairWeight() {}
PairWeight(const PairWeight &weight)
: value1_(weight.value1_), value2_(weight.value2_) {}
PairWeight(W1 w1, W2 w2) : value1_(std::move(w1)), value2_(std::move(w2)) {}
static const PairWeight<W1, W2> &Zero() {
static const PairWeight zero(W1::Zero(), W2::Zero());
return zero;
}
static const PairWeight<W1, W2> &One() {
static const PairWeight one(W1::One(), W2::One());
return one;
}
static const PairWeight<W1, W2> &NoWeight() {
static const PairWeight no_weight(W1::NoWeight(), W2::NoWeight());
return no_weight;
}
std::istream &Read(std::istream &strm) {
value1_.Read(strm);
return value2_.Read(strm);
}
std::ostream &Write(std::ostream &strm) const {
value1_.Write(strm);
return value2_.Write(strm);
}
PairWeight<W1, W2> &operator=(const PairWeight<W1, W2> &weight) {
value1_ = weight.Value1();
value2_ = weight.Value2();
return *this;
}
bool Member() const { return value1_.Member() && value2_.Member(); }
size_t Hash() const {
const auto h1 = value1_.Hash();
const auto h2 = value2_.Hash();
static constexpr int lshift = 5;
static constexpr int rshift = CHAR_BIT * sizeof(size_t) - 5;
return h1 << lshift ^ h1 >> rshift ^ h2;
}
PairWeight<W1, W2> Quantize(float delta = kDelta) const {
return PairWeight<W1, W2>(value1_.Quantize(delta), value2_.Quantize(delta));
}
ReverseWeight Reverse() const {
return ReverseWeight(value1_.Reverse(), value2_.Reverse());
}
const W1 &Value1() const { return value1_; }
const W2 &Value2() const { return value2_; }
void SetValue1(const W1 &weight) { value1_ = weight; }
void SetValue2(const W2 &weight) { value2_ = weight; }
private:
W1 value1_;
W2 value2_;
};
template <class W1, class W2>
inline bool operator==(const PairWeight<W1, W2> &w1,
const PairWeight<W1, W2> &w2) {
return w1.Value1() == w2.Value1() && w1.Value2() == w2.Value2();
}
template <class W1, class W2>
inline bool operator!=(const PairWeight<W1, W2> &w1,
const PairWeight<W1, W2> &w2) {
return w1.Value1() != w2.Value1() || w1.Value2() != w2.Value2();
}
template <class W1, class W2>
inline bool ApproxEqual(const PairWeight<W1, W2> &w1,
const PairWeight<W1, W2> &w2, float delta = kDelta) {
return ApproxEqual(w1.Value1(), w2.Value1(), delta) &&
ApproxEqual(w1.Value2(), w2.Value2(), delta);
}
template <class W1, class W2>
inline std::ostream &operator<<(std::ostream &strm,
const PairWeight<W1, W2> &weight) {
CompositeWeightWriter writer(strm);
writer.WriteBegin();
writer.WriteElement(weight.Value1());
writer.WriteElement(weight.Value2());
writer.WriteEnd();
return strm;
}
template <class W1, class W2>
inline std::istream &operator>>(std::istream &strm,
PairWeight<W1, W2> &weight) {
CompositeWeightReader reader(strm);
reader.ReadBegin();
W1 w1;
reader.ReadElement(&w1);
weight.SetValue1(w1);
W2 w2;
reader.ReadElement(&w2, true);
weight.SetValue2(w2);
reader.ReadEnd();
return strm;
}
// This function object returns weights by calling the underlying generators
// and forming a pair. This is intended primarily for testing.
template <class W1, class W2>
class WeightGenerate<PairWeight<W1, W2>> {
public:
using Weight = PairWeight<W1, W2>;
using Generate1 = WeightGenerate<W1>;
using Generate2 = WeightGenerate<W2>;
explicit WeightGenerate(bool allow_zero = true)
: generate1_(allow_zero), generate2_(allow_zero) {}
Weight operator()() const { return Weight(generate1_(), generate2_()); }
private:
Generate1 generate1_;
Generate2 generate2_;
};
} // namespace fst
#endif // FST_PAIR_WEIGHT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/far/farscript.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Convenience file for including all of the FAR operations, or registering
// them for new arc types.
#ifndef FST_EXTENSIONS_FAR_FARSCRIPT_H_
#define FST_EXTENSIONS_FAR_FARSCRIPT_H_
#include <string>
#include <vector>
#include <fst/types.h>
#include <fst/extensions/far/compile-strings.h>
#include <fst/extensions/far/create.h>
#include <fst/extensions/far/equal.h>
#include <fst/extensions/far/extract.h>
#include <fst/extensions/far/far.h>
#include <fst/extensions/far/far-class.h>
#include <fst/extensions/far/info.h>
#include <fst/extensions/far/isomorphic.h>
#include <fst/extensions/far/print-strings.h>
#include <fst/extensions/far/script-impl.h>
#include <fst/script/arg-packs.h>
namespace fst {
namespace script {
// Note: it is safe to pass these strings as references because this struct is
// only used to pass them deeper in the call graph. Be sure you understand why
// this is so before using this struct for anything else!
struct FarCompileStringsArgs {
const std::vector<string> &in_fnames;
const string &out_fname;
const string &fst_type;
const FarType &far_type;
const int32_t generate_keys;
const FarEntryType fet;
const FarTokenType tt;
const string &symbols_fname;
const string &unknown_symbol;
const bool keep_symbols;
const bool initial_symbols;
const bool allow_negative_labels;
const string &key_prefix;
const string &key_suffix;
FarCompileStringsArgs(const std::vector<string> &in_fnames,
const string &out_fname, const string &fst_type,
const FarType &far_type, int32_t generate_keys,
FarEntryType fet, FarTokenType tt,
const string &symbols_fname,
const string &unknown_symbol, bool keep_symbols,
bool initial_symbols, bool allow_negative_labels,
const string &key_prefix, const string &key_suffix)
: in_fnames(in_fnames),
out_fname(out_fname),
fst_type(fst_type),
far_type(far_type),
generate_keys(generate_keys),
fet(fet),
tt(tt),
symbols_fname(symbols_fname),
unknown_symbol(unknown_symbol),
keep_symbols(keep_symbols),
initial_symbols(initial_symbols),
allow_negative_labels(allow_negative_labels),
key_prefix(key_prefix),
key_suffix(key_suffix) {}
};
template <class Arc>
void FarCompileStrings(FarCompileStringsArgs *args) {
FarCompileStrings<Arc>(
args->in_fnames, args->out_fname, args->fst_type, args->far_type,
args->generate_keys, args->fet, args->tt, args->symbols_fname,
args->unknown_symbol, args->keep_symbols, args->initial_symbols,
args->allow_negative_labels, args->key_prefix, args->key_suffix);
}
void FarCompileStrings(const std::vector<string> &in_fnames,
const string &out_fname, const string &arc_type,
const string &fst_type, const FarType &far_type,
int32_t generate_keys, FarEntryType fet, FarTokenType tt,
const string &symbols_fname,
const string &unknown_symbol, bool keep_symbols,
bool initial_symbols, bool allow_negative_labels,
const string &key_prefix, const string &key_suffix);
// Note: it is safe to pass these strings as references because this struct is
// only used to pass them deeper in the call graph. Be sure you understand why
// this is so before using this struct for anything else!
struct FarCreateArgs {
const std::vector<string> &in_fnames;
const string &out_fname;
const int32_t generate_keys;
const FarType &far_type;
const string &key_prefix;
const string &key_suffix;
FarCreateArgs(const std::vector<string> &in_fnames, const string &out_fname,
const int32_t generate_keys, const FarType &far_type,
const string &key_prefix, const string &key_suffix)
: in_fnames(in_fnames),
out_fname(out_fname),
generate_keys(generate_keys),
far_type(far_type),
key_prefix(key_prefix),
key_suffix(key_suffix) {}
};
template <class Arc>
void FarCreate(FarCreateArgs *args) {
FarCreate<Arc>(args->in_fnames, args->out_fname, args->generate_keys,
args->far_type, args->key_prefix, args->key_suffix);
}
void FarCreate(const std::vector<string> &in_fnames, const string &out_fname,
const string &arc_type, const int32_t generate_keys,
const FarType &far_type, const string &key_prefix,
const string &key_suffix);
using FarEqualInnerArgs = std::tuple<const string &, const string &, float,
const string &, const string &>;
using FarEqualArgs = WithReturnValue<bool, FarEqualInnerArgs>;
template <class Arc>
void FarEqual(FarEqualArgs *args) {
args->retval = fst::FarEqual<Arc>(
std::get<0>(args->args), std::get<1>(args->args), std::get<2>(args->args),
std::get<3>(args->args), std::get<4>(args->args));
}
bool FarEqual(const string &filename1, const string &filename2,
const string &arc_type, float delta = kDelta,
const string &begin_key = string(),
const string &end_key = string());
using FarExtractArgs =
std::tuple<const std::vector<string> &, int32_t, const string &,
const string &, const string &, const string &, const string &>;
template <class Arc>
void FarExtract(FarExtractArgs *args) {
fst::FarExtract<Arc>(std::get<0>(*args), std::get<1>(*args),
std::get<2>(*args), std::get<3>(*args),
std::get<4>(*args), std::get<5>(*args),
std::get<6>(*args));
}
void FarExtract(const std::vector<string> &ifilenames, const string &arc_type,
int32_t generate_filenames, const string &keys,
const string &key_separator, const string &range_delimiter,
const string &filename_prefix, const string &filename_suffix);
using FarInfoArgs = std::tuple<const std::vector<string> &, const string &,
const string &, const bool>;
template <class Arc>
void FarInfo(FarInfoArgs *args) {
fst::FarInfo<Arc>(std::get<0>(*args), std::get<1>(*args),
std::get<2>(*args), std::get<3>(*args));
}
void FarInfo(const std::vector<string> &filenames, const string &arc_type,
const string &begin_key, const string &end_key,
const bool list_fsts);
using GetFarInfoArgs = std::tuple<const std::vector<string> &, const string &,
const string &, const bool, FarInfoData *>;
template <class Arc>
void GetFarInfo(GetFarInfoArgs *args) {
fst::GetFarInfo<Arc>(std::get<0>(*args), std::get<1>(*args),
std::get<2>(*args), std::get<3>(*args),
std::get<4>(*args));
}
void GetFarInfo(const std::vector<string> &filenames, const string &arc_type,
const string &begin_key, const string &end_key,
const bool list_fsts, FarInfoData *);
using FarIsomorphicInnerArgs = std::tuple<const string &, const string &, float,
const string &, const string &>;
using FarIsomorphicArgs = WithReturnValue<bool, FarIsomorphicInnerArgs>;
template <class Arc>
void FarIsomorphic(FarIsomorphicArgs *args) {
args->retval = fst::FarIsomorphic<Arc>(
std::get<0>(args->args), std::get<1>(args->args), std::get<2>(args->args),
std::get<3>(args->args), std::get<4>(args->args));
}
bool FarIsomorphic(const string &filename1, const string &filename2,
const string &arc_type, float delta = kDelta,
const string &begin_key = string(),
const string &end_key = string());
struct FarPrintStringsArgs {
const std::vector<string> &ifilenames;
const FarEntryType entry_type;
const FarTokenType token_type;
const string &begin_key;
const string &end_key;
const bool print_key;
const bool print_weight;
const string &symbols_fname;
const bool initial_symbols;
const int32_t generate_filenames;
const string &filename_prefix;
const string &filename_suffix;
FarPrintStringsArgs(const std::vector<string> &ifilenames,
const FarEntryType entry_type,
const FarTokenType token_type, const string &begin_key,
const string &end_key, const bool print_key,
const bool print_weight, const string &symbols_fname,
const bool initial_symbols,
const int32_t generate_filenames,
const string &filename_prefix,
const string &filename_suffix)
: ifilenames(ifilenames),
entry_type(entry_type),
token_type(token_type),
begin_key(begin_key),
end_key(end_key),
print_key(print_key),
print_weight(print_weight),
symbols_fname(symbols_fname),
initial_symbols(initial_symbols),
generate_filenames(generate_filenames),
filename_prefix(filename_prefix),
filename_suffix(filename_suffix) {}
};
template <class Arc>
void FarPrintStrings(FarPrintStringsArgs *args) {
fst::FarPrintStrings<Arc>(
args->ifilenames, args->entry_type, args->token_type, args->begin_key,
args->end_key, args->print_key, args->print_weight, args->symbols_fname,
args->initial_symbols, args->generate_filenames, args->filename_prefix,
args->filename_suffix);
}
void FarPrintStrings(const std::vector<string> &ifilenames,
const string &arc_type, const FarEntryType entry_type,
const FarTokenType token_type, const string &begin_key,
const string &end_key, const bool print_key,
const bool print_weight, const string &symbols_fname,
const bool initial_symbols, const int32_t generate_filenames,
const string &filename_prefix,
const string &filename_suffix);
} // namespace script
} // namespace fst
#define REGISTER_FST_FAR_OPERATIONS(ArcType) \
REGISTER_FST_OPERATION(FarCompileStrings, ArcType, FarCompileStringsArgs); \
REGISTER_FST_OPERATION(FarCreate, ArcType, FarCreateArgs); \
REGISTER_FST_OPERATION(FarEqual, ArcType, FarEqualArgs); \
REGISTER_FST_OPERATION(FarExtract, ArcType, FarExtractArgs); \
REGISTER_FST_OPERATION(FarInfo, ArcType, FarInfoArgs); \
REGISTER_FST_OPERATION(FarIsomorphic, ArcType, FarIsomorphicArgs); \
REGISTER_FST_OPERATION(FarPrintStrings, ArcType, FarPrintStringsArgs); \
REGISTER_FST_OPERATION(GetFarInfo, ArcType, GetFarInfoArgs)
#endif // FST_EXTENSIONS_FAR_FARSCRIPT_H_
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/node-build.sh | #!/bin/bash
set -xe
package_option=$1
source $(dirname "$0")/tc-tests-utils.sh
source $(dirname "$0")/tf_tc-vars.sh
do_deepspeech_npm_package "${package_option}"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/bin/fsttopsort.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
int fsttopsort_main(int argc, char **argv);
int main(int argc, char **argv) { return fsttopsort_main(argc, argv); }
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/framework/ortdevice.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <sstream>
// Struct to represent a physical device.
struct OrtDevice {
using DeviceType = int8_t;
using MemoryType = int8_t;
using DeviceId = int16_t;
// Pre-defined device types.
static const DeviceType CPU = 0;
static const DeviceType GPU = 1; // Nvidia or AMD
static const DeviceType FPGA = 2;
struct MemType {
// Pre-defined memory types.
static const MemoryType DEFAULT = 0;
static const MemoryType CUDA_PINNED = 1;
static const MemoryType HIP_PINNED = 2;
};
constexpr OrtDevice(DeviceType device_type_, MemoryType memory_type_, DeviceId device_id_)
: device_type(device_type_),
memory_type(memory_type_),
device_id(device_id_) {}
constexpr OrtDevice() : OrtDevice(CPU, MemType::DEFAULT, 0) {}
DeviceType Type() const {
return device_type;
}
MemoryType MemType() const {
return memory_type;
}
DeviceId Id() const {
return device_id;
}
std::string ToString() const {
std::ostringstream ostr;
ostr << "Device:["
<< "DeviceType:" << static_cast<int>(device_type)
<< " MemoryType:" << static_cast<int>(memory_type)
<< " DeviceId:" << device_id
<< "]";
return ostr.str();
}
private:
// Device type.
DeviceType device_type;
// Memory type.
MemoryType memory_type;
// Device index.
DeviceId device_id;
};
inline bool operator==(const OrtDevice& left, const OrtDevice& other) {
return left.Id() == other.Id() && left.MemType() == other.MemType() && left.Type() == other.Type();
}
inline bool operator!=(const OrtDevice& left, const OrtDevice& other) {
return !(left == other);
}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/special/Makefile.in | # Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
@HAVE_BIN_TRUE@bin_PROGRAMS = fstspecial$(EXEEXT)
subdir = src/extensions/special
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/depcomp
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h \
$(top_builddir)/src/include/fst/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libfstdir)" \
"$(DESTDIR)$(bindir)"
LTLIBRARIES = $(lib_LTLIBRARIES) $(libfst_LTLIBRARIES)
am__DEPENDENCIES_1 =
libfstspecial_la_DEPENDENCIES = ../../lib/libfst.la \
$(am__DEPENDENCIES_1)
am_libfstspecial_la_OBJECTS = phi-fst.lo rho-fst.lo sigma-fst.lo
libfstspecial_la_OBJECTS = $(am_libfstspecial_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
libfstspecial_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(libfstspecial_la_LDFLAGS) \
$(LDFLAGS) -o $@
phi_fst_la_DEPENDENCIES = ../../lib/libfst.la $(am__DEPENDENCIES_1)
am_phi_fst_la_OBJECTS = phi-fst.lo
phi_fst_la_OBJECTS = $(am_phi_fst_la_OBJECTS)
phi_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(phi_fst_la_LDFLAGS) $(LDFLAGS) -o $@
rho_fst_la_DEPENDENCIES = ../../lib/libfst.la $(am__DEPENDENCIES_1)
am_rho_fst_la_OBJECTS = rho-fst.lo
rho_fst_la_OBJECTS = $(am_rho_fst_la_OBJECTS)
rho_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(rho_fst_la_LDFLAGS) $(LDFLAGS) -o $@
sigma_fst_la_DEPENDENCIES = ../../lib/libfst.la $(am__DEPENDENCIES_1)
am_sigma_fst_la_OBJECTS = sigma-fst.lo
sigma_fst_la_OBJECTS = $(am_sigma_fst_la_OBJECTS)
sigma_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(sigma_fst_la_LDFLAGS) $(LDFLAGS) -o $@
PROGRAMS = $(bin_PROGRAMS)
am__fstspecial_SOURCES_DIST = ../../bin/fstconvert.cc \
../../bin/fstconvert-main.cc phi-fst.cc rho-fst.cc \
sigma-fst.cc
am__dirstamp = $(am__leading_dot)dirstamp
@HAVE_BIN_TRUE@am_fstspecial_OBJECTS = \
@HAVE_BIN_TRUE@ ../../bin/fstspecial-fstconvert.$(OBJEXT) \
@HAVE_BIN_TRUE@ ../../bin/fstspecial-fstconvert-main.$(OBJEXT) \
@HAVE_BIN_TRUE@ fstspecial-phi-fst.$(OBJEXT) \
@HAVE_BIN_TRUE@ fstspecial-rho-fst.$(OBJEXT) \
@HAVE_BIN_TRUE@ fstspecial-sigma-fst.$(OBJEXT)
fstspecial_OBJECTS = $(am_fstspecial_OBJECTS)
fstspecial_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstspecial_DEPENDENCIES = ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES =
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CXXFLAGS) $(CXXFLAGS)
AM_V_CXX = $(am__v_CXX_@AM_V@)
am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
am__v_CXX_0 = @echo " CXX " $@;
am__v_CXX_1 =
CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
am__v_CXXLD_0 = @echo " CXXLD " $@;
am__v_CXXLD_1 =
SOURCES = $(libfstspecial_la_SOURCES) $(phi_fst_la_SOURCES) \
$(rho_fst_la_SOURCES) $(sigma_fst_la_SOURCES) \
$(fstspecial_SOURCES)
DIST_SOURCES = $(libfstspecial_la_SOURCES) $(phi_fst_la_SOURCES) \
$(rho_fst_la_SOURCES) $(sigma_fst_la_SOURCES) \
$(am__fstspecial_SOURCES_DIST)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DL_LIBS = @DL_LIBS@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@
PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@
PYTHON_LDFLAGS = @PYTHON_LDFLAGS@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_SITE_PKG = @PYTHON_SITE_PKG@
PYTHON_VERSION = @PYTHON_VERSION@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
libfstdir = @libfstdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS)
@HAVE_BIN_TRUE@LDADD = ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS)
@HAVE_BIN_TRUE@fstspecial_SOURCES = ../../bin/fstconvert.cc ../../bin/fstconvert-main.cc \
@HAVE_BIN_TRUE@ phi-fst.cc rho-fst.cc sigma-fst.cc
@HAVE_BIN_TRUE@fstspecial_CPPFLAGS = $(AM_CPPFLAGS)
libfst_LTLIBRARIES = phi-fst.la rho-fst.la sigma-fst.la
lib_LTLIBRARIES = libfstspecial.la
libfstspecial_la_SOURCES = phi-fst.cc rho-fst.cc sigma-fst.cc
libfstspecial_la_LDFLAGS = -version-info 10:0:0 -lm $(DL_LIBS)
libfstspecial_la_LIBADD = ../../lib/libfst.la -lm $(DL_LIBS)
phi_fst_la_SOURCES = phi-fst.cc
phi_fst_la_LDFLAGS = -module
phi_fst_la_LIBADD = ../../lib/libfst.la -lm $(DL_LIBS)
rho_fst_la_SOURCES = rho-fst.cc
rho_fst_la_LDFLAGS = -module
rho_fst_la_LIBADD = ../../lib/libfst.la -lm $(DL_LIBS)
sigma_fst_la_SOURCES = sigma-fst.cc
sigma_fst_la_LDFLAGS = -module
sigma_fst_la_LIBADD = ../../lib/libfst.la -lm $(DL_LIBS)
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/special/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/extensions/special/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
}
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
done
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
@list='$(lib_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
install-libfstLTLIBRARIES: $(libfst_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(libfst_LTLIBRARIES)'; test -n "$(libfstdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libfstdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libfstdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libfstdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libfstdir)"; \
}
uninstall-libfstLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(libfst_LTLIBRARIES)'; test -n "$(libfstdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libfstdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libfstdir)/$$f"; \
done
clean-libfstLTLIBRARIES:
-test -z "$(libfst_LTLIBRARIES)" || rm -f $(libfst_LTLIBRARIES)
@list='$(libfst_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
libfstspecial.la: $(libfstspecial_la_OBJECTS) $(libfstspecial_la_DEPENDENCIES) $(EXTRA_libfstspecial_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libfstspecial_la_LINK) -rpath $(libdir) $(libfstspecial_la_OBJECTS) $(libfstspecial_la_LIBADD) $(LIBS)
phi-fst.la: $(phi_fst_la_OBJECTS) $(phi_fst_la_DEPENDENCIES) $(EXTRA_phi_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(phi_fst_la_LINK) -rpath $(libfstdir) $(phi_fst_la_OBJECTS) $(phi_fst_la_LIBADD) $(LIBS)
rho-fst.la: $(rho_fst_la_OBJECTS) $(rho_fst_la_DEPENDENCIES) $(EXTRA_rho_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(rho_fst_la_LINK) -rpath $(libfstdir) $(rho_fst_la_OBJECTS) $(rho_fst_la_LIBADD) $(LIBS)
sigma-fst.la: $(sigma_fst_la_OBJECTS) $(sigma_fst_la_DEPENDENCIES) $(EXTRA_sigma_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(sigma_fst_la_LINK) -rpath $(libfstdir) $(sigma_fst_la_OBJECTS) $(sigma_fst_la_LIBADD) $(LIBS)
install-binPROGRAMS: $(bin_PROGRAMS)
@$(NORMAL_INSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
$(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
fi; \
for p in $$list; do echo "$$p $$p"; done | \
sed 's/$(EXEEXT)$$//' | \
while read p p1; do if test -f $$p \
|| test -f $$p1 \
; then echo "$$p"; echo "$$p"; else :; fi; \
done | \
sed -e 'p;s,.*/,,;n;h' \
-e 's|.*|.|' \
-e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
{ d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
if ($$2 == $$4) files[d] = files[d] " " $$1; \
else { print "f", $$3 "/" $$4, $$1; } } \
END { for (d in files) print "f", d, files[d] }' | \
while read type dir files; do \
if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
test -z "$$files" || { \
echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
$(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
} \
; done
uninstall-binPROGRAMS:
@$(NORMAL_UNINSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
-e 's/$$/$(EXEEXT)/' \
`; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(bindir)" && rm -f $$files
clean-binPROGRAMS:
@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
echo " rm -f" $$list; \
rm -f $$list || exit $$?; \
test -n "$(EXEEXT)" || exit 0; \
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
../../bin/$(am__dirstamp):
@$(MKDIR_P) ../../bin
@: > ../../bin/$(am__dirstamp)
../../bin/$(DEPDIR)/$(am__dirstamp):
@$(MKDIR_P) ../../bin/$(DEPDIR)
@: > ../../bin/$(DEPDIR)/$(am__dirstamp)
../../bin/fstspecial-fstconvert.$(OBJEXT): ../../bin/$(am__dirstamp) \
../../bin/$(DEPDIR)/$(am__dirstamp)
../../bin/fstspecial-fstconvert-main.$(OBJEXT): \
../../bin/$(am__dirstamp) ../../bin/$(DEPDIR)/$(am__dirstamp)
fstspecial$(EXEEXT): $(fstspecial_OBJECTS) $(fstspecial_DEPENDENCIES) $(EXTRA_fstspecial_DEPENDENCIES)
@rm -f fstspecial$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstspecial_OBJECTS) $(fstspecial_LDADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
-rm -f ../../bin/*.$(OBJEXT)
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@../../bin/$(DEPDIR)/fstspecial-fstconvert.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstspecial-phi-fst.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstspecial-rho-fst.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstspecial-sigma-fst.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/phi-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rho-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sigma-fst.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $<
.cc.obj:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.cc.lo:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $<
../../bin/fstspecial-fstconvert.o: ../../bin/fstconvert.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ../../bin/fstspecial-fstconvert.o -MD -MP -MF ../../bin/$(DEPDIR)/fstspecial-fstconvert.Tpo -c -o ../../bin/fstspecial-fstconvert.o `test -f '../../bin/fstconvert.cc' || echo '$(srcdir)/'`../../bin/fstconvert.cc
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) ../../bin/$(DEPDIR)/fstspecial-fstconvert.Tpo ../../bin/$(DEPDIR)/fstspecial-fstconvert.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='../../bin/fstconvert.cc' object='../../bin/fstspecial-fstconvert.o' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ../../bin/fstspecial-fstconvert.o `test -f '../../bin/fstconvert.cc' || echo '$(srcdir)/'`../../bin/fstconvert.cc
../../bin/fstspecial-fstconvert.obj: ../../bin/fstconvert.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ../../bin/fstspecial-fstconvert.obj -MD -MP -MF ../../bin/$(DEPDIR)/fstspecial-fstconvert.Tpo -c -o ../../bin/fstspecial-fstconvert.obj `if test -f '../../bin/fstconvert.cc'; then $(CYGPATH_W) '../../bin/fstconvert.cc'; else $(CYGPATH_W) '$(srcdir)/../../bin/fstconvert.cc'; fi`
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) ../../bin/$(DEPDIR)/fstspecial-fstconvert.Tpo ../../bin/$(DEPDIR)/fstspecial-fstconvert.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='../../bin/fstconvert.cc' object='../../bin/fstspecial-fstconvert.obj' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ../../bin/fstspecial-fstconvert.obj `if test -f '../../bin/fstconvert.cc'; then $(CYGPATH_W) '../../bin/fstconvert.cc'; else $(CYGPATH_W) '$(srcdir)/../../bin/fstconvert.cc'; fi`
../../bin/fstspecial-fstconvert-main.o: ../../bin/fstconvert-main.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ../../bin/fstspecial-fstconvert-main.o -MD -MP -MF ../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Tpo -c -o ../../bin/fstspecial-fstconvert-main.o `test -f '../../bin/fstconvert-main.cc' || echo '$(srcdir)/'`../../bin/fstconvert-main.cc
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) ../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Tpo ../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='../../bin/fstconvert-main.cc' object='../../bin/fstspecial-fstconvert-main.o' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ../../bin/fstspecial-fstconvert-main.o `test -f '../../bin/fstconvert-main.cc' || echo '$(srcdir)/'`../../bin/fstconvert-main.cc
../../bin/fstspecial-fstconvert-main.obj: ../../bin/fstconvert-main.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ../../bin/fstspecial-fstconvert-main.obj -MD -MP -MF ../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Tpo -c -o ../../bin/fstspecial-fstconvert-main.obj `if test -f '../../bin/fstconvert-main.cc'; then $(CYGPATH_W) '../../bin/fstconvert-main.cc'; else $(CYGPATH_W) '$(srcdir)/../../bin/fstconvert-main.cc'; fi`
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) ../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Tpo ../../bin/$(DEPDIR)/fstspecial-fstconvert-main.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='../../bin/fstconvert-main.cc' object='../../bin/fstspecial-fstconvert-main.obj' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ../../bin/fstspecial-fstconvert-main.obj `if test -f '../../bin/fstconvert-main.cc'; then $(CYGPATH_W) '../../bin/fstconvert-main.cc'; else $(CYGPATH_W) '$(srcdir)/../../bin/fstconvert-main.cc'; fi`
fstspecial-phi-fst.o: phi-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fstspecial-phi-fst.o -MD -MP -MF $(DEPDIR)/fstspecial-phi-fst.Tpo -c -o fstspecial-phi-fst.o `test -f 'phi-fst.cc' || echo '$(srcdir)/'`phi-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/fstspecial-phi-fst.Tpo $(DEPDIR)/fstspecial-phi-fst.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='phi-fst.cc' object='fstspecial-phi-fst.o' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fstspecial-phi-fst.o `test -f 'phi-fst.cc' || echo '$(srcdir)/'`phi-fst.cc
fstspecial-phi-fst.obj: phi-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fstspecial-phi-fst.obj -MD -MP -MF $(DEPDIR)/fstspecial-phi-fst.Tpo -c -o fstspecial-phi-fst.obj `if test -f 'phi-fst.cc'; then $(CYGPATH_W) 'phi-fst.cc'; else $(CYGPATH_W) '$(srcdir)/phi-fst.cc'; fi`
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/fstspecial-phi-fst.Tpo $(DEPDIR)/fstspecial-phi-fst.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='phi-fst.cc' object='fstspecial-phi-fst.obj' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fstspecial-phi-fst.obj `if test -f 'phi-fst.cc'; then $(CYGPATH_W) 'phi-fst.cc'; else $(CYGPATH_W) '$(srcdir)/phi-fst.cc'; fi`
fstspecial-rho-fst.o: rho-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fstspecial-rho-fst.o -MD -MP -MF $(DEPDIR)/fstspecial-rho-fst.Tpo -c -o fstspecial-rho-fst.o `test -f 'rho-fst.cc' || echo '$(srcdir)/'`rho-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/fstspecial-rho-fst.Tpo $(DEPDIR)/fstspecial-rho-fst.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='rho-fst.cc' object='fstspecial-rho-fst.o' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fstspecial-rho-fst.o `test -f 'rho-fst.cc' || echo '$(srcdir)/'`rho-fst.cc
fstspecial-rho-fst.obj: rho-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fstspecial-rho-fst.obj -MD -MP -MF $(DEPDIR)/fstspecial-rho-fst.Tpo -c -o fstspecial-rho-fst.obj `if test -f 'rho-fst.cc'; then $(CYGPATH_W) 'rho-fst.cc'; else $(CYGPATH_W) '$(srcdir)/rho-fst.cc'; fi`
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/fstspecial-rho-fst.Tpo $(DEPDIR)/fstspecial-rho-fst.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='rho-fst.cc' object='fstspecial-rho-fst.obj' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fstspecial-rho-fst.obj `if test -f 'rho-fst.cc'; then $(CYGPATH_W) 'rho-fst.cc'; else $(CYGPATH_W) '$(srcdir)/rho-fst.cc'; fi`
fstspecial-sigma-fst.o: sigma-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fstspecial-sigma-fst.o -MD -MP -MF $(DEPDIR)/fstspecial-sigma-fst.Tpo -c -o fstspecial-sigma-fst.o `test -f 'sigma-fst.cc' || echo '$(srcdir)/'`sigma-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/fstspecial-sigma-fst.Tpo $(DEPDIR)/fstspecial-sigma-fst.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='sigma-fst.cc' object='fstspecial-sigma-fst.o' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fstspecial-sigma-fst.o `test -f 'sigma-fst.cc' || echo '$(srcdir)/'`sigma-fst.cc
fstspecial-sigma-fst.obj: sigma-fst.cc
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fstspecial-sigma-fst.obj -MD -MP -MF $(DEPDIR)/fstspecial-sigma-fst.Tpo -c -o fstspecial-sigma-fst.obj `if test -f 'sigma-fst.cc'; then $(CYGPATH_W) 'sigma-fst.cc'; else $(CYGPATH_W) '$(srcdir)/sigma-fst.cc'; fi`
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/fstspecial-sigma-fst.Tpo $(DEPDIR)/fstspecial-sigma-fst.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='sigma-fst.cc' object='fstspecial-sigma-fst.obj' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(fstspecial_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fstspecial-sigma-fst.obj `if test -f 'sigma-fst.cc'; then $(CYGPATH_W) 'sigma-fst.cc'; else $(CYGPATH_W) '$(srcdir)/sigma-fst.cc'; fi`
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
install-binPROGRAMS: install-libLTLIBRARIES
installdirs:
for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libfstdir)" "$(DESTDIR)$(bindir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-rm -f ../../bin/$(DEPDIR)/$(am__dirstamp)
-rm -f ../../bin/$(am__dirstamp)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libfstLTLIBRARIES clean-libtool mostlyclean-am
distclean: distclean-am
-rm -rf ../../bin/$(DEPDIR) ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am: install-libfstLTLIBRARIES
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am: install-binPROGRAMS install-libLTLIBRARIES
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -rf ../../bin/$(DEPDIR) ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES \
uninstall-libfstLTLIBRARIES
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libfstLTLIBRARIES clean-libtool cscopelist-am ctags \
ctags-am distclean distclean-compile distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-binPROGRAMS \
install-data install-data-am install-dvi install-dvi-am \
install-exec install-exec-am install-html install-html-am \
install-info install-info-am install-libLTLIBRARIES \
install-libfstLTLIBRARIES install-man install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \
uninstall-libLTLIBRARIES uninstall-libfstLTLIBRARIES
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| 0 |
coqui_public_repos | coqui_public_repos/STT/parse_valgrind_suppressions.sh | #! /usr/bin/awk -f
# A script to extract the actual suppression info from the output of (for example) valgrind --leak-check=full --show-reachable=yes --error-limit=no --gen-suppressions=all ./minimal
# The desired bits are between ^{ and ^} (including the braces themselves).
# The combined output should either be appended to /usr/lib/valgrind/default.supp, or placed in a .supp of its own
# If the latter, either tell valgrind about it each time with --suppressions=<filename>, or add that line to ~/.valgrindrc
# NB This script uses the |& operator, which I believe is gawk-specific. In case of failure, check that you're using gawk rather than some other awk
# The script looks for suppressions. When it finds one it stores it temporarily in an array,
# and also feeds it line by line to the external app 'md5sum' which generates a unique checksum for it.
# The checksum is used as an index in a different array. If an item with that index already exists the suppression must be a duplicate and is discarded.
BEGIN { suppression=0; md5sum = "md5sum" }
# If the line begins with '{', it's the start of a supression; so set the var and initialise things
/^{/ {
suppression=1; i=0; next
}
# If the line begins with '}' its the end of a suppression
/^}/ {
if (suppression)
{ suppression=0;
close(md5sum, "to") # We've finished sending data to md5sum, so close that part of the pipe
ProcessInput() # Do the slightly-complicated stuff in functions
delete supparray # We don't want subsequent suppressions to append to it!
}
}
# Otherwise, it's a normal line. If we're inside a supression, store it, and pipe it to md5sum. Otherwise it's cruft, so ignore it
{ if (suppression)
{
supparray[++i] = $0
print |& md5sum
}
}
function ProcessInput()
{
# Pipe the result from md5sum, then close it
md5sum |& getline result
close(md5sum)
# gawk can't cope with enormous ints like $result would be, so stringify it first by prefixing a definite string
resultstring = "prefix"result
if (! (resultstring in chksum_array) )
{ chksum_array[resultstring] = 0; # This checksum hasn't been seen before, so add it to the array
OutputSuppression() # and output the contents of the suppression
}
}
function OutputSuppression()
{
# A suppression is surrounded by '{' and '}'. Its data was stored line by line in the array
print "{"
for (n=1; n <= i; ++n)
{ print supparray[n] }
print "}"
}
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/common | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/common/logging/capture.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdarg>
#include <gsl/gsl>
#include "core/common/common.h"
#include "core/common/code_location.h"
#include "core/common/logging/severity.h"
namespace onnxruntime {
namespace logging {
class Logger;
enum class DataType;
/**
Class to capture the details of a log message.
*/
class Capture {
public:
/**
Initializes a new instance of the Capture class.
@param logger The logger.
@param severity The severity.
@param category The category.
@param dataType Type of the data.
@param location The file location the log message is coming from.
*/
Capture(const Logger& logger, logging::Severity severity, const char* category,
logging::DataType dataType, const CodeLocation& location)
: logger_{&logger}, severity_{severity}, category_{category}, data_type_{dataType}, location_{location} {
}
/**
The stream that can capture the message via operator<<.
@returns Output stream.
*/
std::ostream& Stream() noexcept {
return stream_;
}
#ifdef _MSC_VER
// add SAL annotation for printf format string. requires Code Analysis to run to validate usage.
#define msvc_printf_check _Printf_format_string_
#define __attribute__(x) // Disable for MSVC. Supported by GCC and CLang.
#else
#define msvc_printf_check
#endif
/**
Captures a printf style log message.
@param name="format">The printf format.
@param name="">Arguments to the printf format if needed.
@remarks
A maximum of 2K of output will be captured currently.
Non-static method, so 'this' is implicit first arg, and we use format(printf(2,3)
*/
void CapturePrintf(msvc_printf_check const char* format, ...) __attribute__((format(printf, 2, 3)));
/**
Process a printf style log message.
@param format The printf format.
@param ... Arguments to the printf format if needed.
@remarks
A maximum of 2K of output will be captured currently.
Note: As va_list is 'char *', we have to disambiguate this from CapturePrintf
so that something like "One string: %s", "the string" does not consider "the string"
to be the va_list.
*/
void ProcessPrintf(msvc_printf_check const char* format, va_list args);
logging::Severity Severity() const noexcept {
return severity_;
}
char SeverityPrefix() const noexcept {
// Carefully setup so severity_ is a valid index
GSL_SUPPRESS(bounds .2) {
return logging::SEVERITY_PREFIX[static_cast<int>(severity_)];
}
}
const char* Category() const noexcept {
return category_;
}
logging::DataType DataType() const noexcept {
return data_type_;
}
const CodeLocation& Location() const noexcept {
return location_;
}
std::string Message() const noexcept {
return stream_.str();
}
~Capture();
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Capture);
const Logger* logger_;
const logging::Severity severity_;
const char* category_;
const logging::DataType data_type_;
const CodeLocation location_;
std::ostringstream stream_;
};
} // namespace logging
} // namespace onnxruntime
| 0 |
coqui_public_repos/STT/data | coqui_public_repos/STT/data/smoke_test/ldc93s1_opus.csv | wav_filename,wav_filesize,transcript
LDC93S1.opus,93638,she had your dark suit in greasy wash water all year
| 0 |
coqui_public_repos/STT/native_client/kenlm | coqui_public_repos/STT/native_client/kenlm/util/multi_intersection_test.cc | #include "multi_intersection.hh"
#define BOOST_TEST_MODULE MultiIntersectionTest
#include <boost/test/unit_test.hpp>
namespace util {
namespace {
BOOST_AUTO_TEST_CASE(Empty) {
std::vector<boost::iterator_range<const unsigned int*> > sets;
sets.push_back(boost::iterator_range<const unsigned int*>(static_cast<const unsigned int*>(NULL), static_cast<const unsigned int*>(NULL)));
BOOST_CHECK(!FirstIntersection(sets));
}
BOOST_AUTO_TEST_CASE(Single) {
std::vector<unsigned int> nums;
nums.push_back(1);
nums.push_back(4);
nums.push_back(100);
std::vector<boost::iterator_range<std::vector<unsigned int>::const_iterator> > sets;
sets.push_back(nums);
boost::optional<unsigned int> ret(FirstIntersection(sets));
BOOST_REQUIRE(ret);
BOOST_CHECK_EQUAL(static_cast<unsigned int>(1), *ret);
}
template <class T, unsigned int len> boost::iterator_range<const T*> RangeFromArray(const T (&arr)[len]) {
return boost::iterator_range<const T*>(arr, arr + len);
}
BOOST_AUTO_TEST_CASE(MultiNone) {
unsigned int nums0[] = {1, 3, 4, 22};
unsigned int nums1[] = {2, 5, 12};
unsigned int nums2[] = {4, 17};
std::vector<boost::iterator_range<const unsigned int*> > sets;
sets.push_back(RangeFromArray(nums0));
sets.push_back(RangeFromArray(nums1));
sets.push_back(RangeFromArray(nums2));
BOOST_CHECK(!FirstIntersection(sets));
}
BOOST_AUTO_TEST_CASE(MultiOne) {
unsigned int nums0[] = {1, 3, 4, 17, 22};
unsigned int nums1[] = {2, 5, 12, 17};
unsigned int nums2[] = {4, 17};
std::vector<boost::iterator_range<const unsigned int*> > sets;
sets.push_back(RangeFromArray(nums0));
sets.push_back(RangeFromArray(nums1));
sets.push_back(RangeFromArray(nums2));
boost::optional<unsigned int> ret(FirstIntersection(sets));
BOOST_REQUIRE(ret);
BOOST_CHECK_EQUAL(static_cast<unsigned int>(17), *ret);
}
} // namespace
} // namespace util
| 0 |
coqui_public_repos/STT-examples/android_mic_streaming/app/src/main/java/org | coqui_public_repos/STT-examples/android_mic_streaming/app/src/main/java/org/sttdemo/MainActivity.kt | package org.sttdemo
import android.Manifest
import android.content.pm.PackageManager
import android.media.AudioFormat
import android.media.AudioRecord
import android.media.MediaRecorder
import android.os.Build
import android.os.Bundle
import android.view.View
import androidx.appcompat.app.AppCompatActivity
import androidx.core.app.ActivityCompat
import kotlinx.android.synthetic.main.activity_main.*
import ai.coqui.libstt.STTModel
import java.io.File
import java.util.concurrent.atomic.AtomicBoolean
class MainActivity : AppCompatActivity() {
private var model: STTModel? = null
private var transcriptionThread: Thread? = null
private var isRecording: AtomicBoolean = AtomicBoolean(false)
private val TFLITE_MODEL_FILENAME = "model.tflite"
private val SCORER_FILENAME = "huge-vocab.scorer"
private fun checkAudioPermission() {
// Permission is automatically granted on SDK < 23 upon installation.
if (Build.VERSION.SDK_INT >= 23) {
val permission = Manifest.permission.RECORD_AUDIO
if (checkSelfPermission(permission) != PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(this, arrayOf(permission), 3)
}
}
}
private fun transcribe() {
// We read from the recorder in chunks of 2048 shorts. With a model that expects its input
// at 16000Hz, this corresponds to 2048/16000 = 0.128s or 128ms.
val audioBufferSize = 2048
val audioData = ShortArray(audioBufferSize)
runOnUiThread { btnStartInference.text = "Stop Recording" }
model?.let { model ->
val streamContext = model.createStream()
val recorder = AudioRecord(
MediaRecorder.AudioSource.VOICE_RECOGNITION,
model.sampleRate(),
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
audioBufferSize
)
recorder.startRecording()
while (isRecording.get()) {
recorder.read(audioData, 0, audioBufferSize)
model.feedAudioContent(streamContext, audioData, audioData.size)
val decoded = model.intermediateDecode(streamContext)
runOnUiThread { transcription.text = decoded }
}
val decoded = model.finishStream(streamContext)
runOnUiThread {
btnStartInference.text = "Start Recording"
transcription.text = decoded
}
recorder.stop()
recorder.release()
}
}
private fun createModel(): Boolean {
val modelsPath = getExternalFilesDir(null).toString()
val tfliteModelPath = "$modelsPath/$TFLITE_MODEL_FILENAME"
val scorerPath = "$modelsPath/$SCORER_FILENAME"
for (path in listOf(tfliteModelPath, scorerPath)) {
if (!File(path).exists()) {
status.append("Model creation failed: $path does not exist.\n")
return false
}
}
model = STTModel(tfliteModelPath)
model?.enableExternalScorer(scorerPath)
return true
}
private fun startListening() {
if (isRecording.compareAndSet(false, true)) {
transcriptionThread = Thread(Runnable { transcribe() }, "Transcription Thread")
transcriptionThread?.start()
}
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
checkAudioPermission()
// Create application data directory on the device
val modelsPath = getExternalFilesDir(null).toString()
status.text = "Ready. Copy model files to \"$modelsPath\" if running for the first time.\n"
}
private fun stopListening() {
isRecording.set(false)
}
fun onRecordClick(v: View?) {
if (model == null) {
if (!createModel()) {
return
}
status.append("Created model.\n")
}
if (isRecording.get()) {
stopListening()
} else {
startListening()
}
}
override fun onDestroy() {
super.onDestroy()
if (model != null) {
model?.freeModel()
}
}
}
| 0 |
coqui_public_repos/inference-engine/src | coqui_public_repos/inference-engine/src/ctcdecode/ctc_beam_search_decoder.h | #ifndef CTC_BEAM_SEARCH_DECODER_H_
#define CTC_BEAM_SEARCH_DECODER_H_
#include <memory>
#include <string>
#include <vector>
#include "scorer.h"
#include "output.h"
#include "alphabet.h"
class DecoderState {
int abs_time_step_;
int space_id_;
int blank_id_;
size_t beam_size_;
double cutoff_prob_;
size_t cutoff_top_n_;
bool start_expanding_;
std::shared_ptr<Scorer> ext_scorer_;
std::vector<PathTrie*> prefixes_;
std::unique_ptr<PathTrie> prefix_root_;
TimestepTreeNode timestep_tree_root_{nullptr, 0};
std::unordered_map<std::string, float> hot_words_;
public:
DecoderState() = default;
~DecoderState() = default;
// Disallow copying
DecoderState(const DecoderState&) = delete;
DecoderState& operator=(DecoderState&) = delete;
/* Initialize CTC beam search decoder
*
* Parameters:
* alphabet: The alphabet.
* beam_size: The width of beam search.
* cutoff_prob: Cutoff probability for pruning.
* cutoff_top_n: Cutoff number for pruning.
* ext_scorer: External scorer to evaluate a prefix, which consists of
* n-gram language model scoring and word insertion term.
* Default null, decoding the input sample without scorer.
* Return:
* Zero on success, non-zero on failure.
*/
int init(const Alphabet& alphabet,
size_t beam_size,
double cutoff_prob,
size_t cutoff_top_n,
std::shared_ptr<Scorer> ext_scorer,
std::unordered_map<std::string, float> hot_words);
/* Send data to the decoder
*
* Parameters:
* probs: 2-D vector where each element is a vector of probabilities
* over alphabet of one time step.
* time_dim: Number of timesteps.
* class_dim: Number of classes (alphabet length + 1 for space character).
*/
void next(const double *probs,
int time_dim,
int class_dim);
/* Get up to num_results transcriptions from current decoder state.
*
* Parameters:
* num_results: Number of beams to return.
*
* Return:
* A vector where each element is a pair of score and decoding result,
* in descending order.
*/
std::vector<Output> decode(size_t num_results=1) const;
};
/* CTC Beam Search Decoder
* Parameters:
* probs: 2-D vector where each element is a vector of probabilities
* over alphabet of one time step.
* time_dim: Number of timesteps.
* class_dim: Alphabet length (plus 1 for space character).
* alphabet: The alphabet.
* beam_size: The width of beam search.
* cutoff_prob: Cutoff probability for pruning.
* cutoff_top_n: Cutoff number for pruning.
* ext_scorer: External scorer to evaluate a prefix, which consists of
* n-gram language model scoring and word insertion term.
* Default null, decoding the input sample without scorer.
* hot_words: A map of hot-words and their corresponding boosts
* The hot-word is a string and the boost is a float.
* num_results: Number of beams to return.
* Return:
* A vector where each element is a pair of score and decoding result,
* in descending order.
*/
std::vector<Output> ctc_beam_search_decoder(
const double* probs,
int time_dim,
int class_dim,
const Alphabet &alphabet,
size_t beam_size,
double cutoff_prob,
size_t cutoff_top_n,
std::shared_ptr<Scorer> ext_scorer,
std::unordered_map<std::string, float> hot_words,
size_t num_results=1);
/* CTC Beam Search Decoder for batch data
* Parameters:
* probs: 3-D vector where each element is a 2-D vector that can be used
* by ctc_beam_search_decoder().
* alphabet: The alphabet.
* beam_size: The width of beam search.
* num_processes: Number of threads for beam search.
* cutoff_prob: Cutoff probability for pruning.
* cutoff_top_n: Cutoff number for pruning.
* ext_scorer: External scorer to evaluate a prefix, which consists of
* n-gram language model scoring and word insertion term.
* Default null, decoding the input sample without scorer.
* hot_words: A map of hot-words and their corresponding boosts
* The hot-word is a string and the boost is a float.
* num_results: Number of beams to return.
* Return:
* A 2-D vector where each element is a vector of beam search decoding
* result for one audio sample.
*/
std::vector<std::vector<Output>>
ctc_beam_search_decoder_batch(
const double* probs,
int batch_size,
int time_dim,
int class_dim,
const int* seq_lengths,
int seq_lengths_size,
const Alphabet &alphabet,
size_t beam_size,
size_t num_processes,
double cutoff_prob,
size_t cutoff_top_n,
std::shared_ptr<Scorer> ext_scorer,
std::unordered_map<std::string, float> hot_words,
size_t num_results=1);
#endif // CTC_BEAM_SEARCH_DECODER_H_
| 0 |
coqui_public_repos/STT-examples/electron | coqui_public_repos/STT-examples/electron/public/manifest.json | {
"short_name": "React App",
"name": "Create React App Sample",
"icons": [
{
"src": "favicon.ico",
"sizes": "64x64 32x32 24x24 16x16",
"type": "image/x-icon"
},
{
"src": "logo192.png",
"type": "image/png",
"sizes": "192x192"
},
{
"src": "logo512.png",
"type": "image/png",
"sizes": "512x512"
}
],
"start_url": ".",
"display": "standalone",
"theme_color": "#000000",
"background_color": "#ffffff"
}
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/pdt/shortest-path.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions to find shortest paths in a PDT.
#ifndef FST_EXTENSIONS_PDT_SHORTEST_PATH_H_
#define FST_EXTENSIONS_PDT_SHORTEST_PATH_H_
#include <stack>
#include <unordered_map>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/extensions/pdt/paren.h>
#include <fst/extensions/pdt/pdt.h>
#include <fst/shortest-path.h>
namespace fst {
template <class Arc, class Queue>
struct PdtShortestPathOptions {
bool keep_parentheses;
bool path_gc;
PdtShortestPathOptions(bool keep_parentheses = false, bool path_gc = true)
: keep_parentheses(keep_parentheses), path_gc(path_gc) {}
};
namespace internal {
// Flags for shortest path data.
constexpr uint8 kPdtInited = 0x01;
constexpr uint8 kPdtFinal = 0x02;
constexpr uint8 kPdtMarked = 0x04;
// Stores shortest path tree info Distance(), Parent(), and ArcParent()
// information keyed on two types:
//
// 1. SearchState: This is a usual node in a shortest path tree but:
// a. is w.r.t a PDT search state (a pair of a PDT state and a "start" state,
// either the PDT start state or the destination state of an open
// parenthesis).
// b. the Distance() is from this "start" state to the search state.
// c. Parent().state is kNoLabel for the "start" state.
//
// 2. ParenSpec: This connects shortest path trees depending on the the
// parenthesis taken. Given the parenthesis spec:
// a. the Distance() is from the Parent() "start" state to the parenthesis
// destination state.
// b. The ArcParent() is the parenthesis arc.
template <class Arc>
class PdtShortestPathData {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
struct SearchState {
StateId state; // PDT state.
StateId start; // PDT paren "start" state.
SearchState(StateId s = kNoStateId, StateId t = kNoStateId)
: state(s), start(t) {}
bool operator==(const SearchState &other) const {
if (&other == this) return true;
return other.state == state && other.start == start;
}
};
// Specifies paren ID, source and dest "start" states of a paren. These are
// the "start" states of the respective sub-graphs.
struct ParenSpec {
ParenSpec(Label paren_id = kNoLabel, StateId src_start = kNoStateId,
StateId dest_start = kNoStateId)
: paren_id(paren_id), src_start(src_start), dest_start(dest_start) {}
Label paren_id;
StateId src_start; // Sub-graph "start" state for paren source.
StateId dest_start; // Sub-graph "start" state for paren dest.
bool operator==(const ParenSpec &other) const {
if (&other == this) return true;
return (other.paren_id == paren_id &&
other.src_start == other.src_start &&
other.dest_start == dest_start);
}
};
struct SearchData {
SearchData()
: distance(Weight::Zero()),
parent(kNoStateId, kNoStateId),
paren_id(kNoLabel),
flags(0) {}
Weight distance; // Distance to this state from PDT "start" state.
SearchState parent; // Parent state in shortest path tree.
int16 paren_id; // If parent arc has paren, paren ID (or kNoLabel).
uint8 flags; // First byte reserved for PdtShortestPathData use.
};
PdtShortestPathData(bool gc)
: gc_(gc), nstates_(0), ngc_(0), finished_(false) {}
~PdtShortestPathData() {
VLOG(1) << "opm size: " << paren_map_.size();
VLOG(1) << "# of search states: " << nstates_;
if (gc_) VLOG(1) << "# of GC'd search states: " << ngc_;
}
void Clear() {
search_map_.clear();
search_multimap_.clear();
paren_map_.clear();
state_ = SearchState(kNoStateId, kNoStateId);
nstates_ = 0;
ngc_ = 0;
}
// TODO(kbg): Currently copying SearchState and passing a const reference to
// ParenSpec. Benchmark to confirm this is the right thing to do.
Weight Distance(SearchState s) const { return GetSearchData(s)->distance; }
Weight Distance(const ParenSpec &paren) const {
return GetSearchData(paren)->distance;
}
SearchState Parent(SearchState s) const { return GetSearchData(s)->parent; }
SearchState Parent(const ParenSpec &paren) const {
return GetSearchData(paren)->parent;
}
Label ParenId(SearchState s) const { return GetSearchData(s)->paren_id; }
uint8 Flags(SearchState s) const { return GetSearchData(s)->flags; }
void SetDistance(SearchState s, Weight weight) {
GetSearchData(s)->distance = std::move(weight);
}
void SetDistance(const ParenSpec &paren, Weight weight) {
GetSearchData(paren)->distance = std::move(weight);
}
void SetParent(SearchState s, SearchState p) { GetSearchData(s)->parent = p; }
void SetParent(const ParenSpec &paren, SearchState p) {
GetSearchData(paren)->parent = p;
}
void SetParenId(SearchState s, Label p) {
if (p >= 32768) {
FSTERROR() << "PdtShortestPathData: Paren ID does not fit in an int16";
}
GetSearchData(s)->paren_id = p;
}
void SetFlags(SearchState s, uint8 f, uint8 mask) {
auto *data = GetSearchData(s);
data->flags &= ~mask;
data->flags |= f & mask;
}
void GC(StateId s);
void Finish() { finished_ = true; }
private:
// Hash for search state.
struct SearchStateHash {
size_t operator()(const SearchState &s) const {
static constexpr auto prime = 7853;
return s.state + s.start * prime;
}
};
// Hash for paren map.
struct ParenHash {
size_t operator()(const ParenSpec &paren) const {
static constexpr auto prime0 = 7853;
static constexpr auto prime1 = 7867;
return paren.paren_id + paren.src_start * prime0 +
paren.dest_start * prime1;
}
};
using SearchMap =
std::unordered_map<SearchState, SearchData, SearchStateHash>;
using SearchMultimap = std::unordered_multimap<StateId, StateId>;
// Hash map from paren spec to open paren data.
using ParenMap = std::unordered_map<ParenSpec, SearchData, ParenHash>;
SearchData *GetSearchData(SearchState s) const {
if (s == state_) return state_data_;
if (finished_) {
auto it = search_map_.find(s);
if (it == search_map_.end()) return &null_search_data_;
state_ = s;
return state_data_ = &(it->second);
} else {
state_ = s;
state_data_ = &search_map_[s];
if (!(state_data_->flags & kPdtInited)) {
++nstates_;
if (gc_) search_multimap_.insert(std::make_pair(s.start, s.state));
state_data_->flags = kPdtInited;
}
return state_data_;
}
}
SearchData *GetSearchData(ParenSpec paren) const {
if (paren == paren_) return paren_data_;
if (finished_) {
auto it = paren_map_.find(paren);
if (it == paren_map_.end()) return &null_search_data_;
paren_ = paren;
return state_data_ = &(it->second);
} else {
paren_ = paren;
return paren_data_ = &paren_map_[paren];
}
}
mutable SearchMap search_map_; // Maps from search state to data.
mutable SearchMultimap search_multimap_; // Maps from "start" to subgraph.
mutable ParenMap paren_map_; // Maps paren spec to search data.
mutable SearchState state_; // Last state accessed.
mutable SearchData *state_data_; // Last state data accessed.
mutable ParenSpec paren_; // Last paren spec accessed.
mutable SearchData *paren_data_; // Last paren data accessed.
bool gc_; // Allow GC?
mutable size_t nstates_; // Total number of search states.
size_t ngc_; // Number of GC'd search states.
mutable SearchData null_search_data_; // Null search data.
bool finished_; // Read-only access when true.
PdtShortestPathData(const PdtShortestPathData &) = delete;
PdtShortestPathData &operator=(const PdtShortestPathData &) = delete;
};
// Deletes inaccessible search data from a given "start" (open paren dest)
// state. Assumes "final" (close paren source or PDT final) states have
// been flagged kPdtFinal.
template <class Arc>
void PdtShortestPathData<Arc>::GC(StateId start) {
if (!gc_) return;
std::vector<StateId> finals;
for (auto it = search_multimap_.find(start);
it != search_multimap_.end() && it->first == start; ++it) {
const SearchState s(it->second, start);
if (search_map_[s].flags & kPdtFinal) finals.push_back(s.state);
}
// Mark phase.
for (const auto state : finals) {
SearchState ss(state, start);
while (ss.state != kNoLabel) {
auto &sdata = search_map_[ss];
if (sdata.flags & kPdtMarked) break;
sdata.flags |= kPdtMarked;
const auto p = sdata.parent;
if (p.start != start && p.start != kNoLabel) { // Entering sub-subgraph.
const ParenSpec paren(sdata.paren_id, ss.start, p.start);
ss = paren_map_[paren].parent;
} else {
ss = p;
}
}
}
// Sweep phase.
auto it = search_multimap_.find(start);
while (it != search_multimap_.end() && it->first == start) {
const SearchState s(it->second, start);
auto mit = search_map_.find(s);
const SearchData &data = mit->second;
if (!(data.flags & kPdtMarked)) {
search_map_.erase(mit);
++ngc_;
}
search_multimap_.erase(it++);
}
}
} // namespace internal
// This computes the single source shortest (balanced) path (SSSP) through a
// weighted PDT that has a bounded stack (i.e., is expandable as an FST). It is
// a generalization of the classic SSSP graph algorithm that removes a state s
// from a queue (defined by a user-provided queue type) and relaxes the
// destination states of transitions leaving s. In this PDT version, states that
// have entering open parentheses are treated as source states for a sub-graph
// SSSP problem with the shortest path up to the open parenthesis being first
// saved. When a close parenthesis is then encountered any balancing open
// parenthesis is examined for this saved information and multiplied back. In
// this way, each sub-graph is entered only once rather than repeatedly. If
// every state in the input PDT has the property that there is a unique "start"
// state for it with entering open parentheses, then this algorithm is quite
// straightforward. In general, this will not be the case, so the algorithm
// (implicitly) creates a new graph where each state is a pair of an original
// state and a possible parenthesis "start" state for that state.
template <class Arc, class Queue>
class PdtShortestPath {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using SpData = internal::PdtShortestPathData<Arc>;
using SearchState = typename SpData::SearchState;
using ParenSpec = typename SpData::ParenSpec;
using CloseSourceIterator =
typename internal::PdtBalanceData<Arc>::SetIterator;
PdtShortestPath(const Fst<Arc> &ifst,
const std::vector<std::pair<Label, Label>> &parens,
const PdtShortestPathOptions<Arc, Queue> &opts)
: ifst_(ifst.Copy()),
parens_(parens),
keep_parens_(opts.keep_parentheses),
start_(ifst.Start()),
sp_data_(opts.path_gc),
error_(false) {
// TODO(kbg): Make this a compile-time static_assert once:
// 1) All weight properties are made constexpr for all weight types.
// 2) We have a pleasant way to "deregister" this oepration for non-path
// semirings so an informative error message is produced. The best
// solution will probably involve some kind of SFINAE magic.
if ((Weight::Properties() & (kPath | kRightSemiring)) !=
(kPath | kRightSemiring)) {
FSTERROR() << "PdtShortestPath: Weight needs to have the path"
<< " property and be right distributive: " << Weight::Type();
error_ = true;
}
for (Label i = 0; i < parens.size(); ++i) {
const auto &pair = parens[i];
paren_map_[pair.first] = i;
paren_map_[pair.second] = i;
}
}
~PdtShortestPath() {
VLOG(1) << "# of input states: " << CountStates(*ifst_);
VLOG(1) << "# of enqueued: " << nenqueued_;
VLOG(1) << "cpmm size: " << close_paren_multimap_.size();
}
void ShortestPath(MutableFst<Arc> *ofst) {
Init(ofst);
GetDistance(start_);
GetPath();
sp_data_.Finish();
if (error_) ofst->SetProperties(kError, kError);
}
const internal::PdtShortestPathData<Arc> &GetShortestPathData() const {
return sp_data_;
}
internal::PdtBalanceData<Arc> *GetBalanceData() { return &balance_data_; }
public:
// Hash multimap from close paren label to an paren arc.
using CloseParenMultimap =
std::unordered_multimap<internal::ParenState<Arc>, Arc,
typename internal::ParenState<Arc>::Hash>;
const CloseParenMultimap &GetCloseParenMultimap() const {
return close_paren_multimap_;
}
private:
void Init(MutableFst<Arc> *ofst);
void GetDistance(StateId start);
void ProcFinal(SearchState s);
void ProcArcs(SearchState s);
void ProcOpenParen(Label paren_id, SearchState s, StateId nexstate,
const Weight &weight);
void ProcCloseParen(Label paren_id, SearchState s, const Weight &weight);
void ProcNonParen(SearchState s, StateId nextstate, const Weight &weight);
void Relax(SearchState s, SearchState t, StateId nextstate,
const Weight &weight, Label paren_id);
void Enqueue(SearchState d);
void GetPath();
Arc GetPathArc(SearchState s, SearchState p, Label paren_id, bool open);
std::unique_ptr<Fst<Arc>> ifst_;
MutableFst<Arc> *ofst_;
const std::vector<std::pair<Label, Label>> &parens_;
bool keep_parens_;
Queue *state_queue_;
StateId start_;
Weight fdistance_;
SearchState f_parent_;
SpData sp_data_;
std::unordered_map<Label, Label> paren_map_;
CloseParenMultimap close_paren_multimap_;
internal::PdtBalanceData<Arc> balance_data_;
ssize_t nenqueued_;
bool error_;
static constexpr uint8 kEnqueued = 0x10;
static constexpr uint8 kExpanded = 0x20;
static constexpr uint8 kFinished = 0x40;
static const Arc kNoArc;
};
template <class Arc, class Queue>
void PdtShortestPath<Arc, Queue>::Init(MutableFst<Arc> *ofst) {
ofst_ = ofst;
ofst->DeleteStates();
ofst->SetInputSymbols(ifst_->InputSymbols());
ofst->SetOutputSymbols(ifst_->OutputSymbols());
if (ifst_->Start() == kNoStateId) return;
fdistance_ = Weight::Zero();
f_parent_ = SearchState(kNoStateId, kNoStateId);
sp_data_.Clear();
close_paren_multimap_.clear();
balance_data_.Clear();
nenqueued_ = 0;
// Finds open parens per destination state and close parens per source state.
for (StateIterator<Fst<Arc>> siter(*ifst_); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
for (ArcIterator<Fst<Arc>> aiter(*ifst_, s); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
const auto it = paren_map_.find(arc.ilabel);
if (it != paren_map_.end()) { // Is a paren?
const auto paren_id = it->second;
if (arc.ilabel == parens_[paren_id].first) { // Open paren.
balance_data_.OpenInsert(paren_id, arc.nextstate);
} else { // Close paren.
const internal::ParenState<Arc> paren_state(paren_id, s);
close_paren_multimap_.emplace(paren_state, arc);
}
}
}
}
}
// Computes the shortest distance stored in a recursive way. Each sub-graph
// (i.e., different paren "start" state) begins with weight One().
template <class Arc, class Queue>
void PdtShortestPath<Arc, Queue>::GetDistance(StateId start) {
if (start == kNoStateId) return;
Queue state_queue;
state_queue_ = &state_queue;
const SearchState q(start, start);
Enqueue(q);
sp_data_.SetDistance(q, Weight::One());
while (!state_queue_->Empty()) {
const auto state = state_queue_->Head();
state_queue_->Dequeue();
const SearchState s(state, start);
sp_data_.SetFlags(s, 0, kEnqueued);
ProcFinal(s);
ProcArcs(s);
sp_data_.SetFlags(s, kExpanded, kExpanded);
}
sp_data_.SetFlags(q, kFinished, kFinished);
balance_data_.FinishInsert(start);
sp_data_.GC(start);
}
// Updates best complete path.
template <class Arc, class Queue>
void PdtShortestPath<Arc, Queue>::ProcFinal(SearchState s) {
if (ifst_->Final(s.state) != Weight::Zero() && s.start == start_) {
const auto weight = Times(sp_data_.Distance(s), ifst_->Final(s.state));
if (fdistance_ != Plus(fdistance_, weight)) {
if (f_parent_.state != kNoStateId) {
sp_data_.SetFlags(f_parent_, 0, internal::kPdtFinal);
}
sp_data_.SetFlags(s, internal::kPdtFinal, internal::kPdtFinal);
fdistance_ = Plus(fdistance_, weight);
f_parent_ = s;
}
}
}
// Processes all arcs leaving the state s.
template <class Arc, class Queue>
void PdtShortestPath<Arc, Queue>::ProcArcs(SearchState s) {
for (ArcIterator<Fst<Arc>> aiter(*ifst_, s.state); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
const auto weight = Times(sp_data_.Distance(s), arc.weight);
const auto it = paren_map_.find(arc.ilabel);
if (it != paren_map_.end()) { // Is a paren?
const auto paren_id = it->second;
if (arc.ilabel == parens_[paren_id].first) {
ProcOpenParen(paren_id, s, arc.nextstate, weight);
} else {
ProcCloseParen(paren_id, s, weight);
}
} else {
ProcNonParen(s, arc.nextstate, weight);
}
}
}
// Saves the shortest path info for reaching this parenthesis and starts a new
// SSSP in the sub-graph pointed to by the parenthesis if previously unvisited.
// Otherwise it finds any previously encountered closing parentheses and relaxes
// them using the recursively stored shortest distance to them.
template <class Arc, class Queue>
inline void PdtShortestPath<Arc, Queue>::ProcOpenParen(Label paren_id,
SearchState s,
StateId nextstate,
const Weight &weight) {
const SearchState d(nextstate, nextstate);
const ParenSpec paren(paren_id, s.start, d.start);
const auto pdist = sp_data_.Distance(paren);
if (pdist != Plus(pdist, weight)) {
sp_data_.SetDistance(paren, weight);
sp_data_.SetParent(paren, s);
const auto dist = sp_data_.Distance(d);
if (dist == Weight::Zero()) {
auto *state_queue = state_queue_;
GetDistance(d.start);
state_queue_ = state_queue;
} else if (!(sp_data_.Flags(d) & kFinished)) {
FSTERROR()
<< "PdtShortestPath: open parenthesis recursion: not bounded stack";
error_ = true;
}
for (auto set_iter = balance_data_.Find(paren_id, nextstate);
!set_iter.Done(); set_iter.Next()) {
const SearchState cpstate(set_iter.Element(), d.start);
const internal::ParenState<Arc> paren_state(paren_id, cpstate.state);
for (auto cpit = close_paren_multimap_.find(paren_state);
cpit != close_paren_multimap_.end() && paren_state == cpit->first;
++cpit) {
const auto &cparc = cpit->second;
const auto cpw =
Times(weight, Times(sp_data_.Distance(cpstate), cparc.weight));
Relax(cpstate, s, cparc.nextstate, cpw, paren_id);
}
}
}
}
// Saves the correspondence between each closing parenthesis and its balancing
// open parenthesis info. Relaxes any close parenthesis destination state that
// has a balancing previously encountered open parenthesis.
template <class Arc, class Queue>
inline void PdtShortestPath<Arc, Queue>::ProcCloseParen(Label paren_id,
SearchState s,
const Weight &weight) {
const internal::ParenState<Arc> paren_state(paren_id, s.start);
if (!(sp_data_.Flags(s) & kExpanded)) {
balance_data_.CloseInsert(paren_id, s.start, s.state);
sp_data_.SetFlags(s, internal::kPdtFinal, internal::kPdtFinal);
}
}
// Classical relaxation for non-parentheses.
template <class Arc, class Queue>
inline void PdtShortestPath<Arc, Queue>::ProcNonParen(SearchState s,
StateId nextstate,
const Weight &weight) {
Relax(s, s, nextstate, weight, kNoLabel);
}
// Classical relaxation on the search graph for an arc with destination state
// nexstate from state s. State t is in the same sub-graph as nextstate (i.e.,
// has the same paren "start").
template <class Arc, class Queue>
inline void PdtShortestPath<Arc, Queue>::Relax(SearchState s, SearchState t,
StateId nextstate,
const Weight &weight,
Label paren_id) {
const SearchState d(nextstate, t.start);
Weight dist = sp_data_.Distance(d);
if (dist != Plus(dist, weight)) {
sp_data_.SetParent(d, s);
sp_data_.SetParenId(d, paren_id);
sp_data_.SetDistance(d, Plus(dist, weight));
Enqueue(d);
}
}
template <class Arc, class Queue>
inline void PdtShortestPath<Arc, Queue>::Enqueue(SearchState s) {
if (!(sp_data_.Flags(s) & kEnqueued)) {
state_queue_->Enqueue(s.state);
sp_data_.SetFlags(s, kEnqueued, kEnqueued);
++nenqueued_;
} else {
state_queue_->Update(s.state);
}
}
// Follows parent pointers to find the shortest path. A stack is used since the
// shortest distance is stored recursively.
template <class Arc, class Queue>
void PdtShortestPath<Arc, Queue>::GetPath() {
SearchState s = f_parent_;
SearchState d = SearchState(kNoStateId, kNoStateId);
StateId s_p = kNoStateId;
StateId d_p = kNoStateId;
auto arc = kNoArc;
Label paren_id = kNoLabel;
std::stack<ParenSpec> paren_stack;
while (s.state != kNoStateId) {
d_p = s_p;
s_p = ofst_->AddState();
if (d.state == kNoStateId) {
ofst_->SetFinal(s_p, ifst_->Final(f_parent_.state));
} else {
if (paren_id != kNoLabel) { // Paren?
if (arc.ilabel == parens_[paren_id].first) { // Open paren?
paren_stack.pop();
} else { // Close paren?
const ParenSpec paren(paren_id, d.start, s.start);
paren_stack.push(paren);
}
if (!keep_parens_) arc.ilabel = arc.olabel = 0;
}
arc.nextstate = d_p;
ofst_->AddArc(s_p, arc);
}
d = s;
s = sp_data_.Parent(d);
paren_id = sp_data_.ParenId(d);
if (s.state != kNoStateId) {
arc = GetPathArc(s, d, paren_id, false);
} else if (!paren_stack.empty()) {
const ParenSpec paren = paren_stack.top();
s = sp_data_.Parent(paren);
paren_id = paren.paren_id;
arc = GetPathArc(s, d, paren_id, true);
}
}
ofst_->SetStart(s_p);
ofst_->SetProperties(
ShortestPathProperties(ofst_->Properties(kFstProperties, false)),
kFstProperties);
}
// Finds transition with least weight between two states with label matching
// paren_id and open/close paren type or a non-paren if kNoLabel.
template <class Arc, class Queue>
Arc PdtShortestPath<Arc, Queue>::GetPathArc(SearchState s, SearchState d,
Label paren_id, bool open_paren) {
auto path_arc = kNoArc;
for (ArcIterator<Fst<Arc>> aiter(*ifst_, s.state); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
if (arc.nextstate != d.state) continue;
Label arc_paren_id = kNoLabel;
const auto it = paren_map_.find(arc.ilabel);
if (it != paren_map_.end()) {
arc_paren_id = it->second;
bool arc_open_paren = (arc.ilabel == parens_[arc_paren_id].first);
if (arc_open_paren != open_paren) continue;
}
if (arc_paren_id != paren_id) continue;
if (arc.weight == Plus(arc.weight, path_arc.weight)) path_arc = arc;
}
if (path_arc.nextstate == kNoStateId) {
FSTERROR() << "PdtShortestPath::GetPathArc: Failed to find arc";
error_ = true;
}
return path_arc;
}
template <class Arc, class Queue>
const Arc PdtShortestPath<Arc, Queue>::kNoArc = Arc(kNoLabel, kNoLabel,
Weight::Zero(), kNoStateId);
// Functional variants.
template <class Arc, class Queue>
void ShortestPath(
const Fst<Arc> &ifst,
const std::vector<std::pair<typename Arc::Label, typename Arc::Label>>
&parens,
MutableFst<Arc> *ofst, const PdtShortestPathOptions<Arc, Queue> &opts) {
PdtShortestPath<Arc, Queue> psp(ifst, parens, opts);
psp.ShortestPath(ofst);
}
template <class Arc>
void ShortestPath(
const Fst<Arc> &ifst,
const std::vector<std::pair<typename Arc::Label, typename Arc::Label>>
&parens,
MutableFst<Arc> *ofst) {
using Q = FifoQueue<typename Arc::StateId>;
const PdtShortestPathOptions<Arc, Q> opts;
PdtShortestPath<Arc, Q> psp(ifst, parens, opts);
psp.ShortestPath(ofst);
}
} // namespace fst
#endif // FST_EXTENSIONS_PDT_SHORTEST_PATH_H_
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/lm/search_hashed.cc | #include "lm/search_hashed.hh"
#include "lm/binary_format.hh"
#include "lm/blank.hh"
#include "lm/lm_exception.hh"
#include "lm/model.hh"
#include "lm/read_arpa.hh"
#include "lm/value.hh"
#include "lm/vocab.hh"
#include "util/bit_packing.hh"
#include "util/file_piece.hh"
#include <string>
namespace lm {
namespace ngram {
class ProbingModel;
namespace {
/* These are passed to ReadNGrams so that n-grams with zero backoff that appear as context will still be used in state. */
template <class Middle> class ActivateLowerMiddle {
public:
explicit ActivateLowerMiddle(Middle &middle) : modify_(middle) {}
void operator()(const WordIndex *vocab_ids, const unsigned int n) {
uint64_t hash = static_cast<WordIndex>(vocab_ids[1]);
for (const WordIndex *i = vocab_ids + 2; i < vocab_ids + n; ++i) {
hash = detail::CombineWordHash(hash, *i);
}
typename Middle::MutableIterator i;
// TODO: somehow get text of n-gram for this error message.
if (!modify_.UnsafeMutableFind(hash, i))
UTIL_THROW(FormatLoadException, "The context of every " << n << "-gram should appear as a " << (n-1) << "-gram");
SetExtension(i->value.backoff);
}
private:
Middle &modify_;
};
template <class Weights> class ActivateUnigram {
public:
explicit ActivateUnigram(Weights *unigram) : modify_(unigram) {}
void operator()(const WordIndex *vocab_ids, const unsigned int /*n*/) {
// assert(n == 2);
SetExtension(modify_[vocab_ids[1]].backoff);
}
private:
Weights *modify_;
};
// Find the lower order entry, inserting blanks along the way as necessary.
template <class Value> void FindLower(
const std::vector<uint64_t> &keys,
typename Value::Weights &unigram,
std::vector<util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash> > &middle,
std::vector<typename Value::Weights *> &between) {
typename util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash>::MutableIterator iter;
typename Value::ProbingEntry entry;
// Backoff will always be 0.0. We'll get the probability and rest in another pass.
entry.value.backoff = kNoExtensionBackoff;
// Go back and find the longest right-aligned entry, informing it that it extends left. Normally this will match immediately, but sometimes SRI is dumb.
for (int lower = keys.size() - 2; ; --lower) {
if (lower == -1) {
between.push_back(&unigram);
return;
}
entry.key = keys[lower];
bool found = middle[lower].FindOrInsert(entry, iter);
between.push_back(&iter->value);
if (found) return;
}
}
// Between usually has single entry, the value to adjust. But sometimes SRI stupidly pruned entries so it has unitialized blank values to be set here.
template <class Added, class Build> void AdjustLower(
const Added &added,
const Build &build,
std::vector<typename Build::Value::Weights *> &between,
const unsigned int n,
const std::vector<WordIndex> &vocab_ids,
typename Build::Value::Weights *unigrams,
std::vector<util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash> > &middle) {
typedef typename Build::Value Value;
if (between.size() == 1) {
build.MarkExtends(*between.front(), added);
return;
}
typedef util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash> Middle;
float prob = -fabs(between.back()->prob);
// Order of the n-gram on which probabilities are based.
unsigned char basis = n - between.size();
assert(basis != 0);
typename Build::Value::Weights **change = &between.back();
// Skip the basis.
--change;
if (basis == 1) {
// Hallucinate a bigram based on a unigram's backoff and a unigram probability.
float &backoff = unigrams[vocab_ids[1]].backoff;
SetExtension(backoff);
prob += backoff;
(*change)->prob = prob;
build.SetRest(&*vocab_ids.begin(), 2, **change);
basis = 2;
--change;
}
uint64_t backoff_hash = static_cast<uint64_t>(vocab_ids[1]);
for (unsigned char i = 2; i <= basis; ++i) {
backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[i]);
}
for (; basis < n - 1; ++basis, --change) {
typename Middle::MutableIterator gotit;
if (middle[basis - 2].UnsafeMutableFind(backoff_hash, gotit)) {
float &backoff = gotit->value.backoff;
SetExtension(backoff);
prob += backoff;
}
(*change)->prob = prob;
build.SetRest(&*vocab_ids.begin(), basis + 1, **change);
backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[basis+1]);
}
typename std::vector<typename Value::Weights *>::const_iterator i(between.begin());
build.MarkExtends(**i, added);
const typename Value::Weights *longer = *i;
// Everything has probability but is not marked as extending.
for (++i; i != between.end(); ++i) {
build.MarkExtends(**i, *longer);
longer = *i;
}
}
// Continue marking lower entries even they know that they extend left. This is used for upper/lower bounds.
template <class Build> void MarkLower(
const std::vector<uint64_t> &keys,
const Build &build,
typename Build::Value::Weights &unigram,
std::vector<util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash> > &middle,
int start_order,
const typename Build::Value::Weights &longer) {
if (start_order == 0) return;
// Hopefully the compiler will realize that if MarkExtends always returns false, it can simplify this code.
for (int even_lower = start_order - 2 /* index in middle */; ; --even_lower) {
if (even_lower == -1) {
build.MarkExtends(unigram, longer);
return;
}
if (!build.MarkExtends(
middle[even_lower].UnsafeMutableMustFind(keys[even_lower])->value,
longer)) return;
}
}
template <class Build, class Activate, class Store> void ReadNGrams(
util::FilePiece &f,
const unsigned int n,
const size_t count,
const ProbingVocabulary &vocab,
const Build &build,
typename Build::Value::Weights *unigrams,
std::vector<util::ProbingHashTable<typename Build::Value::ProbingEntry, util::IdentityHash> > &middle,
Activate activate,
Store &store,
PositiveProbWarn &warn) {
typedef typename Build::Value Value;
assert(n >= 2);
ReadNGramHeader(f, n);
// Both vocab_ids and keys are non-empty because n >= 2.
// vocab ids of words in reverse order.
std::vector<WordIndex> vocab_ids(n);
std::vector<uint64_t> keys(n-1);
typename Store::Entry entry;
std::vector<typename Value::Weights *> between;
for (size_t i = 0; i < count; ++i) {
ReadNGram(f, n, vocab, vocab_ids.rbegin(), entry.value, warn);
build.SetRest(&*vocab_ids.begin(), n, entry.value);
keys[0] = detail::CombineWordHash(static_cast<uint64_t>(vocab_ids.front()), vocab_ids[1]);
for (unsigned int h = 1; h < n - 1; ++h) {
keys[h] = detail::CombineWordHash(keys[h-1], vocab_ids[h+1]);
}
// Initially the sign bit is on, indicating it does not extend left. Most already have this but there might +0.0.
util::SetSign(entry.value.prob);
entry.key = keys[n-2];
store.Insert(entry);
between.clear();
FindLower<Value>(keys, unigrams[vocab_ids.front()], middle, between);
AdjustLower<typename Store::Entry::Value, Build>(entry.value, build, between, n, vocab_ids, unigrams, middle);
if (Build::kMarkEvenLower) MarkLower<Build>(keys, build, unigrams[vocab_ids.front()], middle, n - between.size() - 1, *between.back());
activate(&*vocab_ids.begin(), n);
}
store.FinishedInserting();
}
} // namespace
namespace detail {
template <class Value> uint8_t *HashedSearch<Value>::SetupMemory(uint8_t *start, const std::vector<uint64_t> &counts, const Config &config) {
unigram_ = Unigram(start, counts[0]);
start += Unigram::Size(counts[0]);
std::size_t allocated;
middle_.clear();
for (unsigned int n = 2; n < counts.size(); ++n) {
allocated = Middle::Size(counts[n - 1], config.probing_multiplier);
middle_.push_back(Middle(start, allocated));
start += allocated;
}
allocated = Longest::Size(counts.back(), config.probing_multiplier);
longest_ = Longest(start, allocated);
start += allocated;
return start;
}
/*template <class Value> void HashedSearch<Value>::Relocate(uint8_t *start, const std::vector<uint64_t> &counts, const Config &config) {
unigram_ = Unigram(start, counts[0]);
start += Unigram::Size(counts[0]);
for (unsigned int n = 2; n < counts.size(); ++n) {
middle[n-2].Relocate(start);
start += Middle::Size(counts[n - 1], config.probing_multiplier)
}
longest_.Relocate(start);
}*/
template <class Value> void HashedSearch<Value>::InitializeFromARPA(const char * /*file*/, util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, ProbingVocabulary &vocab, BinaryFormat &backing) {
void *vocab_rebase;
void *search_base = backing.GrowForSearch(Size(counts, config), vocab.UnkCountChangePadding(), vocab_rebase);
vocab.Relocate(vocab_rebase);
SetupMemory(reinterpret_cast<uint8_t*>(search_base), counts, config);
PositiveProbWarn warn(config.positive_log_probability);
Read1Grams(f, counts[0], vocab, unigram_.Raw(), warn);
CheckSpecials(config, vocab);
DispatchBuild(f, counts, config, vocab, warn);
}
template <> void HashedSearch<BackoffValue>::DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) {
NoRestBuild build;
ApplyBuild(f, counts, vocab, warn, build);
}
template <> void HashedSearch<RestValue>::DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) {
switch (config.rest_function) {
case Config::REST_MAX:
{
MaxRestBuild build;
ApplyBuild(f, counts, vocab, warn, build);
}
break;
case Config::REST_LOWER:
{
LowerRestBuild<ProbingModel> build(config, counts.size(), vocab);
ApplyBuild(f, counts, vocab, warn, build);
}
break;
}
}
template <class Value> template <class Build> void HashedSearch<Value>::ApplyBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const ProbingVocabulary &vocab, PositiveProbWarn &warn, const Build &build) {
for (WordIndex i = 0; i < counts[0]; ++i) {
build.SetRest(&i, (unsigned int)1, unigram_.Raw()[i]);
}
try {
if (counts.size() > 2) {
ReadNGrams<Build, ActivateUnigram<typename Value::Weights>, Middle>(
f, 2, counts[1], vocab, build, unigram_.Raw(), middle_, ActivateUnigram<typename Value::Weights>(unigram_.Raw()), middle_[0], warn);
}
for (unsigned int n = 3; n < counts.size(); ++n) {
ReadNGrams<Build, ActivateLowerMiddle<Middle>, Middle>(
f, n, counts[n-1], vocab, build, unigram_.Raw(), middle_, ActivateLowerMiddle<Middle>(middle_[n-3]), middle_[n-2], warn);
}
if (counts.size() > 2) {
ReadNGrams<Build, ActivateLowerMiddle<Middle>, Longest>(
f, counts.size(), counts[counts.size() - 1], vocab, build, unigram_.Raw(), middle_, ActivateLowerMiddle<Middle>(middle_.back()), longest_, warn);
} else {
ReadNGrams<Build, ActivateUnigram<typename Value::Weights>, Longest>(
f, counts.size(), counts[counts.size() - 1], vocab, build, unigram_.Raw(), middle_, ActivateUnigram<typename Value::Weights>(unigram_.Raw()), longest_, warn);
}
} catch (util::ProbingSizeException &e) {
UTIL_THROW(util::ProbingSizeException, "Avoid pruning n-grams like \"bar baz quux\" when \"foo bar baz quux\" is still in the model. KenLM will work when this pruning happens, but the probing model assumes these events are rare enough that using blank space in the probing hash table will cover all of them. Increase probing_multiplier (-p to build_binary) to add more blank spaces.\n");
}
ReadEnd(f);
}
template class HashedSearch<BackoffValue>;
template class HashedSearch<RestValue>;
} // namespace detail
} // namespace ngram
} // namespace lm
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/util/spaces.cc | #include "util/spaces.hh"
namespace util {
// Sigh this is the only way I could come up with to do a _const_ bool. It has ' ', '\f', '\n', '\r', '\t', and '\v' (same as isspace on C locale).
const bool kSpaces[256] = {0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
} // namespace util
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/ci_scripts/android-package.sh | #!/bin/bash
set -xe
source $(dirname "$0")/all-vars.sh
source $(dirname "$0")/package-utils.sh
mkdir -p ${CI_ARTIFACTS_DIR} || true
cp ${DS_DSDIR}/tensorflow/bazel*.log ${CI_ARTIFACTS_DIR}/
arm_flavor=$1
package_native_client_ndk "native_client.tar.xz" "${arm_flavor}"
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/tc-decision_reqs.txt | json-e == 2.3.1
networkx
pyaml
requests
slugid == 1.0.7
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/mpdt/expand.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Expands an MPDT to an FST.
#ifndef FST_EXTENSIONS_MPDT_EXPAND_H_
#define FST_EXTENSIONS_MPDT_EXPAND_H_
#include <vector>
#include <fst/extensions/mpdt/mpdt.h>
#include <fst/extensions/pdt/paren.h>
#include <fst/cache.h>
#include <fst/mutable-fst.h>
#include <fst/queue.h>
#include <fst/state-table.h>
#include <fst/test-properties.h>
namespace fst {
template <class Arc>
struct MPdtExpandFstOptions : public CacheOptions {
bool keep_parentheses;
internal::MPdtStack<typename Arc::StateId, typename Arc::Label> *stack;
PdtStateTable<typename Arc::StateId, typename Arc::StateId> *state_table;
MPdtExpandFstOptions(
const CacheOptions &opts = CacheOptions(), bool kp = false,
internal::MPdtStack<typename Arc::StateId, typename Arc::Label> *s =
nullptr,
PdtStateTable<typename Arc::StateId, typename Arc::StateId> *st = nullptr)
: CacheOptions(opts), keep_parentheses(kp), stack(s), state_table(st) {}
};
// Properties for an expanded PDT.
inline uint64_t MPdtExpandProperties(uint64_t inprops) {
return inprops & (kAcceptor | kAcyclic | kInitialAcyclic | kUnweighted);
}
namespace internal {
// Implementation class for ExpandFst
template <class Arc>
class MPdtExpandFstImpl : public CacheImpl<Arc> {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using StackId = StateId;
using StateTuple = PdtStateTuple<StateId, StackId>;
using ParenStack = internal::MPdtStack<StateId, Label>;
using FstImpl<Arc>::SetType;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::Properties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using CacheBaseImpl<CacheState<Arc>>::PushArc;
using CacheBaseImpl<CacheState<Arc>>::HasArcs;
using CacheBaseImpl<CacheState<Arc>>::HasFinal;
using CacheBaseImpl<CacheState<Arc>>::HasStart;
using CacheBaseImpl<CacheState<Arc>>::SetArcs;
using CacheBaseImpl<CacheState<Arc>>::SetFinal;
using CacheBaseImpl<CacheState<Arc>>::SetStart;
MPdtExpandFstImpl(const Fst<Arc> &fst,
const std::vector<std::pair<Label, Label>> &parens,
const std::vector<Label> &assignments,
const MPdtExpandFstOptions<Arc> &opts)
: CacheImpl<Arc>(opts),
fst_(fst.Copy()),
stack_(opts.stack ? opts.stack : new ParenStack(parens, assignments)),
state_table_(opts.state_table ? opts.state_table
: new PdtStateTable<StateId, StackId>()),
own_stack_(!opts.stack),
own_state_table_(!opts.state_table),
keep_parentheses_(opts.keep_parentheses) {
SetType("expand");
const auto props = fst.Properties(kFstProperties, false);
SetProperties(MPdtExpandProperties(props), kCopyProperties);
SetInputSymbols(fst.InputSymbols());
SetOutputSymbols(fst.OutputSymbols());
}
MPdtExpandFstImpl(const MPdtExpandFstImpl &impl)
: CacheImpl<Arc>(impl),
fst_(impl.fst_->Copy(true)),
stack_(new ParenStack(*impl.stack_)),
state_table_(new PdtStateTable<StateId, StackId>()),
own_stack_(true),
own_state_table_(true),
keep_parentheses_(impl.keep_parentheses_) {
SetType("expand");
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
}
~MPdtExpandFstImpl() override {
if (own_stack_) delete stack_;
if (own_state_table_) delete state_table_;
}
StateId Start() {
if (!HasStart()) {
const auto s = fst_->Start();
if (s == kNoStateId) return kNoStateId;
const StateTuple tuple(s, 0);
const auto start = state_table_->FindState(tuple);
SetStart(start);
}
return CacheImpl<Arc>::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) {
const auto &tuple = state_table_->Tuple(s);
const auto weight = fst_->Final(tuple.state_id);
SetFinal(s,
(weight != Weight::Zero() && tuple.stack_id == 0)
? weight
: Weight::Zero());
}
return CacheImpl<Arc>::Final(s);
}
size_t NumArcs(StateId s) {
if (!HasArcs(s)) ExpandState(s);
return CacheImpl<Arc>::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) ExpandState(s);
return CacheImpl<Arc>::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) ExpandState(s);
return CacheImpl<Arc>::NumOutputEpsilons(s);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) {
if (!HasArcs(s)) ExpandState(s);
CacheImpl<Arc>::InitArcIterator(s, data);
}
// Computes the outgoing transitions from a state, creating new destination
// states as needed.
void ExpandState(StateId s) {
const auto tuple = state_table_->Tuple(s);
for (ArcIterator<Fst<Arc>> aiter(*fst_, tuple.state_id); !aiter.Done();
aiter.Next()) {
auto arc = aiter.Value();
const auto stack_id = stack_->Find(tuple.stack_id, arc.ilabel);
if (stack_id == -1) {
continue; // Non-matching close parenthesis.
} else if ((stack_id != tuple.stack_id) && !keep_parentheses_) {
arc.ilabel = arc.olabel = 0; // Stack push/pop.
}
const StateTuple ntuple(arc.nextstate, stack_id);
arc.nextstate = state_table_->FindState(ntuple);
PushArc(s, arc);
}
SetArcs(s);
}
const ParenStack &GetStack() const { return *stack_; }
const PdtStateTable<StateId, StackId> &GetStateTable() const {
return *state_table_;
}
private:
std::unique_ptr<const Fst<Arc>> fst_;
ParenStack *stack_;
PdtStateTable<StateId, StackId> *state_table_;
const bool own_stack_;
const bool own_state_table_;
const bool keep_parentheses_;
MPdtExpandFstImpl &operator=(const MPdtExpandFstImpl &) = delete;
};
} // namespace internal
// Expands a multi-pushdown transducer (MPDT) encoded as an FST into an FST.
// This version is a delayed FST. In the MPDT, some transitions are labeled with
// open or close parentheses. To be interpreted as an MPDT, the parens for each
// stack must balance on a path. The open-close parenthesis label
// pairs are passed using the parens argument, and the assignment of those pairs
// to stacks is passed using the assignments argument. Expansion enforces the
// parenthesis constraints. The MPDT must be
// expandable as an FST.
//
// This class attaches interface to implementation and handles
// reference counting, delegating most methods to ImplToFst.
template <class A>
class MPdtExpandFst : public ImplToFst<internal::MPdtExpandFstImpl<A>> {
public:
using Arc = A;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using StackId = StateId;
using ParenStack = internal::MPdtStack<StackId, Label>;
using Store = DefaultCacheStore<Arc>;
using State = typename Store::State;
using Impl = internal::MPdtExpandFstImpl<Arc>;
friend class ArcIterator<MPdtExpandFst<Arc>>;
friend class StateIterator<MPdtExpandFst<Arc>>;
MPdtExpandFst(const Fst<Arc> &fst,
const std::vector<std::pair<Label, Label>> &parens,
const std::vector<Label> &assignments)
: ImplToFst<Impl>(std::make_shared<Impl>(fst, parens, assignments,
MPdtExpandFstOptions<Arc>())) {}
MPdtExpandFst(const Fst<Arc> &fst,
const std::vector<std::pair<Label, Label>> &parens,
const std::vector<Label> &assignments,
const MPdtExpandFstOptions<Arc> &opts)
: ImplToFst<Impl>(
std::make_shared<Impl>(fst, parens, assignments, opts)) {}
// See Fst<>::Copy() for doc.
MPdtExpandFst(const MPdtExpandFst<Arc> &fst, bool safe = false)
: ImplToFst<Impl>(fst, safe) {}
// Get a copy of this ExpandFst. See Fst<>::Copy() for further doc.
MPdtExpandFst<Arc> *Copy(bool safe = false) const override {
return new MPdtExpandFst<A>(*this, safe);
}
inline void InitStateIterator(StateIteratorData<Arc> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
const ParenStack &GetStack() const { return GetImpl()->GetStack(); }
const PdtStateTable<StateId, StackId> &GetStateTable() const {
return GetImpl()->GetStateTable();
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
void operator=(const MPdtExpandFst &) = delete;
};
// Specialization for MPdtExpandFst.
template <class Arc>
class StateIterator<MPdtExpandFst<Arc>>
: public CacheStateIterator<MPdtExpandFst<Arc>> {
public:
explicit StateIterator(const MPdtExpandFst<Arc> &fst)
: CacheStateIterator<MPdtExpandFst<Arc>>(fst, fst.GetMutableImpl()) {}
};
// Specialization for MPdtExpandFst.
template <class Arc>
class ArcIterator<MPdtExpandFst<Arc>>
: public CacheArcIterator<MPdtExpandFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const MPdtExpandFst<Arc> &fst, StateId s)
: CacheArcIterator<MPdtExpandFst<Arc>>(fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->ExpandState(s);
}
};
template <class Arc>
inline void MPdtExpandFst<Arc>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<MPdtExpandFst<Arc>>(*this);
}
struct MPdtExpandOptions {
bool connect;
bool keep_parentheses;
explicit MPdtExpandOptions(bool connect = true, bool keep_parentheses = false)
: connect(connect), keep_parentheses(keep_parentheses) {}
};
// Expands a multi-pushdown transducer (MPDT) encoded as an FST into an FST.
// This version writes the expanded PDT to a mutable FST. In the MPDT, some
// transitions are labeled with open or close parentheses. To be interpreted as
// an MPDT, the parens for each stack must balance on a path. The open-close
// parenthesis label pair sets are passed using the parens argument, and the
// assignment of those pairs to stacks is passed using the assignments argument.
// The expansion enforces the parenthesis constraints. The MPDT must be
// expandable as an FST.
template <class Arc>
void Expand(const Fst<Arc> &ifst,
const std::vector<
std::pair<typename Arc::Label, typename Arc::Label>> &parens,
const std::vector<typename Arc::Label> &assignments,
MutableFst<Arc> *ofst, const MPdtExpandOptions &opts) {
MPdtExpandFstOptions<Arc> eopts;
eopts.gc_limit = 0;
eopts.keep_parentheses = opts.keep_parentheses;
*ofst = MPdtExpandFst<Arc>(ifst, parens, assignments, eopts);
if (opts.connect) Connect(ofst);
}
// Expands a multi-pushdown transducer (MPDT) encoded as an FST into an FST.
// This version writes the expanded PDT to a mutable FST. In the MPDT, some
// transitions are labeled with open or close parentheses. To be interpreted as
// an MPDT, the parens for each stack must balance on a path. The open-close
// parenthesis label pair sets are passed using the parens argument, and the
// assignment of those pairs to stacks is passed using the assignments argument.
// The expansion enforces the parenthesis constraints. The MPDT must be
// expandable as an FST.
template <class Arc>
void Expand(const Fst<Arc> &ifst,
const std::vector<std::pair<typename Arc::Label,
typename Arc::Label>> &parens,
const std::vector<typename Arc::Label> &assignments,
MutableFst<Arc> *ofst, bool connect = true,
bool keep_parentheses = false) {
const MPdtExpandOptions opts(connect, keep_parentheses);
Expand(ifst, parens, assignments, ofst, opts);
}
} // namespace fst
#endif // FST_EXTENSIONS_MPDT_EXPAND_H_
| 0 |
coqui_public_repos/TTS/TTS/tts/utils/text | coqui_public_repos/TTS/TTS/tts/utils/text/bangla/phonemizer.py | import re
import bangla
from bnnumerizer import numerize
from bnunicodenormalizer import Normalizer
# initialize
bnorm = Normalizer()
attribution_dict = {
"সাঃ": "সাল্লাল্লাহু আলাইহি ওয়া সাল্লাম",
"আঃ": "আলাইহিস সালাম",
"রাঃ": "রাদিআল্লাহু আনহু",
"রহঃ": "রহমাতুল্লাহি আলাইহি",
"রহিঃ": "রহিমাহুল্লাহ",
"হাফিঃ": "হাফিযাহুল্লাহ",
"বায়ান": "বাইআন",
"দাঃবাঃ": "দামাত বারাকাতুহুম,দামাত বারাকাতুল্লাহ",
# "আয়াত" : "আইআত",#আইআত
# "ওয়া" : "ওআ",
# "ওয়াসাল্লাম" : "ওআসাল্লাম",
# "কেন" : "কেনো",
# "কোন" : "কোনো",
# "বল" : "বলো",
# "চল" : "চলো",
# "কর" : "করো",
# "রাখ" : "রাখো",
"’": "",
"‘": "",
# "য়" : "অ",
# "সম্প্রদায়" : "সম্প্রদাই",
# "রয়েছে" : "রইছে",
# "রয়েছ" : "রইছ",
"/": " বাই ",
}
def tag_text(text: str):
# remove multiple spaces
text = re.sub(" +", " ", text)
# create start and end
text = "start" + text + "end"
# tag text
parts = re.split("[\u0600-\u06FF]+", text)
# remove non chars
parts = [p for p in parts if p.strip()]
# unique parts
parts = set(parts)
# tag the text
for m in parts:
if len(m.strip()) > 1:
text = text.replace(m, f"{m}")
# clean-tags
text = text.replace("start", "")
text = text.replace("end", "")
return text
def normalize(sen):
global bnorm # pylint: disable=global-statement
_words = [bnorm(word)["normalized"] for word in sen.split()]
return " ".join([word for word in _words if word is not None])
def expand_full_attribution(text):
for word, attr in attribution_dict.items():
if word in text:
text = text.replace(word, normalize(attr))
return text
def collapse_whitespace(text):
# Regular expression matching whitespace:
_whitespace_re = re.compile(r"\s+")
return re.sub(_whitespace_re, " ", text)
def bangla_text_to_phonemes(text: str) -> str:
# english numbers to bangla conversion
res = re.search("[0-9]", text)
if res is not None:
text = bangla.convert_english_digit_to_bangla_digit(text)
# replace ':' in between two bangla numbers with ' এর '
pattern = r"[০, ১, ২, ৩, ৪, ৫, ৬, ৭, ৮, ৯]:[০, ১, ২, ৩, ৪, ৫, ৬, ৭, ৮, ৯]"
matches = re.findall(pattern, text)
for m in matches:
r = m.replace(":", " এর ")
text = text.replace(m, r)
# numerize text
text = numerize(text)
# tag sections
text = tag_text(text)
# text blocks
# blocks = text.split("")
# blocks = [b for b in blocks if b.strip()]
# create tuple of (lang,text)
if "" in text:
text = text.replace("", "").replace("", "")
# Split based on sentence ending Characters
bn_text = text.strip()
sentenceEnders = re.compile("[।!?]")
sentences = sentenceEnders.split(str(bn_text))
data = ""
for sent in sentences:
res = re.sub("\n", "", sent)
res = normalize(res)
# expand attributes
res = expand_full_attribution(res)
res = collapse_whitespace(res)
res += "।"
data += res
return data
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/connect.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Classes and functions to remove unsuccessful paths from an FST.
#ifndef FST_CONNECT_H_
#define FST_CONNECT_H_
#include <vector>
#include <fst/dfs-visit.h>
#include <fst/mutable-fst.h>
#include <fst/union-find.h>
namespace fst {
// Finds and returns connected components. Use with Visit().
template <class Arc>
class CcVisitor {
public:
using Weight = typename Arc::Weight;
using StateId = typename Arc::StateId;
// cc[i]: connected component number for state i.
explicit CcVisitor(std::vector<StateId> *cc)
: comps_(new UnionFind<StateId>(0, kNoStateId)), cc_(cc), nstates_(0) {}
// comps: connected components equiv classes.
explicit CcVisitor(UnionFind<StateId> *comps)
: comps_(comps), cc_(nullptr), nstates_(0) {}
~CcVisitor() {
if (cc_) delete comps_;
}
void InitVisit(const Fst<Arc> &fst) {}
bool InitState(StateId s, StateId root) {
++nstates_;
if (comps_->FindSet(s) == kNoStateId) comps_->MakeSet(s);
return true;
}
bool WhiteArc(StateId s, const Arc &arc) {
comps_->MakeSet(arc.nextstate);
comps_->Union(s, arc.nextstate);
return true;
}
bool GreyArc(StateId s, const Arc &arc) {
comps_->Union(s, arc.nextstate);
return true;
}
bool BlackArc(StateId s, const Arc &arc) {
comps_->Union(s, arc.nextstate);
return true;
}
void FinishState(StateId s) {}
void FinishVisit() {
if (cc_) GetCcVector(cc_);
}
// Returns number of components.
// cc[i]: connected component number for state i.
int GetCcVector(std::vector<StateId> *cc) {
cc->clear();
cc->resize(nstates_, kNoStateId);
StateId ncomp = 0;
for (StateId s = 0; s < nstates_; ++s) {
const auto rep = comps_->FindSet(s);
auto &comp = (*cc)[rep];
if (comp == kNoStateId) {
comp = ncomp;
++ncomp;
}
(*cc)[s] = comp;
}
return ncomp;
}
private:
UnionFind<StateId> *comps_; // Components.
std::vector<StateId> *cc_; // State's cc number.
StateId nstates_; // State count.
};
// Finds and returns strongly-connected components, accessible and
// coaccessible states and related properties. Uses Tarjan's single
// DFS SCC algorithm (see Aho, et al, "Design and Analysis of Computer
// Algorithms", 189pp). Use with DfsVisit();
template <class Arc>
class SccVisitor {
public:
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
// scc[i]: strongly-connected component number for state i.
// SCC numbers will be in topological order for acyclic input.
// access[i]: accessibility of state i.
// coaccess[i]: coaccessibility of state i.
// Any of above can be NULL.
// props: related property bits (cyclicity, initial cyclicity,
// accessibility, coaccessibility) set/cleared (o.w. unchanged).
SccVisitor(std::vector<StateId> *scc, std::vector<bool> *access,
std::vector<bool> *coaccess, uint64_t *props)
: scc_(scc), access_(access), coaccess_(coaccess), props_(props) {}
explicit SccVisitor(uint64_t *props)
: scc_(nullptr), access_(nullptr), coaccess_(nullptr), props_(props) {}
void InitVisit(const Fst<Arc> &fst);
bool InitState(StateId s, StateId root);
bool TreeArc(StateId s, const Arc &arc) { return true; }
bool BackArc(StateId s, const Arc &arc) {
const auto t = arc.nextstate;
if ((*dfnumber_)[t] < (*lowlink_)[s]) (*lowlink_)[s] = (*dfnumber_)[t];
if ((*coaccess_)[t]) (*coaccess_)[s] = true;
*props_ |= kCyclic;
*props_ &= ~kAcyclic;
if (t == start_) {
*props_ |= kInitialCyclic;
*props_ &= ~kInitialAcyclic;
}
return true;
}
bool ForwardOrCrossArc(StateId s, const Arc &arc) {
const auto t = arc.nextstate;
if ((*dfnumber_)[t] < (*dfnumber_)[s] /* cross edge */ && (*onstack_)[t] &&
(*dfnumber_)[t] < (*lowlink_)[s]) {
(*lowlink_)[s] = (*dfnumber_)[t];
}
if ((*coaccess_)[t]) (*coaccess_)[s] = true;
return true;
}
// Last argument always ignored, but required by the interface.
void FinishState(StateId state, StateId p, const Arc *);
void FinishVisit() {
// Numbers SCCs in topological order when acyclic.
if (scc_) {
for (StateId s = 0; s < scc_->size(); ++s) {
(*scc_)[s] = nscc_ - 1 - (*scc_)[s];
}
}
if (coaccess_internal_) delete coaccess_;
dfnumber_.reset();
lowlink_.reset();
onstack_.reset();
scc_stack_.reset();
}
private:
std::vector<StateId> *scc_; // State's scc number.
std::vector<bool> *access_; // State's accessibility.
std::vector<bool> *coaccess_; // State's coaccessibility.
uint64_t *props_;
const Fst<Arc> *fst_;
StateId start_;
StateId nstates_; // State count.
StateId nscc_; // SCC count.
bool coaccess_internal_;
std::unique_ptr<std::vector<StateId>> dfnumber_; // State discovery times.
std::unique_ptr<std::vector<StateId>>
lowlink_; // lowlink[state] == dfnumber[state] => SCC root
std::unique_ptr<std::vector<bool>> onstack_; // Is a state on the SCC stack?
std::unique_ptr<std::vector<StateId>>
scc_stack_; // SCC stack, with random access.
};
template <class Arc>
inline void SccVisitor<Arc>::InitVisit(const Fst<Arc> &fst) {
if (scc_) scc_->clear();
if (access_) access_->clear();
if (coaccess_) {
coaccess_->clear();
coaccess_internal_ = false;
} else {
coaccess_ = new std::vector<bool>;
coaccess_internal_ = true;
}
*props_ |= kAcyclic | kInitialAcyclic | kAccessible | kCoAccessible;
*props_ &= ~(kCyclic | kInitialCyclic | kNotAccessible | kNotCoAccessible);
fst_ = &fst;
start_ = fst.Start();
nstates_ = 0;
nscc_ = 0;
dfnumber_.reset(new std::vector<StateId>());
lowlink_.reset(new std::vector<StateId>());
onstack_.reset(new std::vector<bool>());
scc_stack_.reset(new std::vector<StateId>());
}
template <class Arc>
inline bool SccVisitor<Arc>::InitState(StateId s, StateId root) {
scc_stack_->push_back(s);
while (dfnumber_->size() <= s) {
if (scc_) scc_->push_back(-1);
if (access_) access_->push_back(false);
coaccess_->push_back(false);
dfnumber_->push_back(-1);
lowlink_->push_back(-1);
onstack_->push_back(false);
}
(*dfnumber_)[s] = nstates_;
(*lowlink_)[s] = nstates_;
(*onstack_)[s] = true;
if (root == start_) {
if (access_) (*access_)[s] = true;
} else {
if (access_) (*access_)[s] = false;
*props_ |= kNotAccessible;
*props_ &= ~kAccessible;
}
++nstates_;
return true;
}
template <class Arc>
inline void SccVisitor<Arc>::FinishState(StateId s, StateId p, const Arc *) {
if (fst_->Final(s) != Weight::Zero()) (*coaccess_)[s] = true;
if ((*dfnumber_)[s] == (*lowlink_)[s]) { // Root of new SCC.
bool scc_coaccess = false;
auto i = scc_stack_->size();
StateId t;
do {
t = (*scc_stack_)[--i];
if ((*coaccess_)[t]) scc_coaccess = true;
} while (s != t);
do {
t = scc_stack_->back();
if (scc_) (*scc_)[t] = nscc_;
if (scc_coaccess) (*coaccess_)[t] = true;
(*onstack_)[t] = false;
scc_stack_->pop_back();
} while (s != t);
if (!scc_coaccess) {
*props_ |= kNotCoAccessible;
*props_ &= ~kCoAccessible;
}
++nscc_;
}
if (p != kNoStateId) {
if ((*coaccess_)[s]) (*coaccess_)[p] = true;
if ((*lowlink_)[s] < (*lowlink_)[p]) (*lowlink_)[p] = (*lowlink_)[s];
}
}
// Trims an FST, removing states and arcs that are not on successful paths.
// This version modifies its input.
//
// Complexity:
//
// Time: O(V + E)
// Space: O(V + E)
//
// where V = # of states and E = # of arcs.
template <class Arc>
void Connect(MutableFst<Arc> *fst) {
using StateId = typename Arc::StateId;
std::vector<bool> access;
std::vector<bool> coaccess;
uint64_t props = 0;
SccVisitor<Arc> scc_visitor(nullptr, &access, &coaccess, &props);
DfsVisit(*fst, &scc_visitor);
std::vector<StateId> dstates;
for (StateId s = 0; s < access.size(); ++s) {
if (!access[s] || !coaccess[s]) dstates.push_back(s);
}
fst->DeleteStates(dstates);
fst->SetProperties(kAccessible | kCoAccessible, kAccessible | kCoAccessible);
}
// Returns an acyclic FST where each SCC in the input FST has been condensed to
// a single state with transitions between SCCs retained and within SCCs
// dropped. Also populates 'scc' with a mapping from input to output states.
template <class Arc>
void Condense(const Fst<Arc> &ifst, MutableFst<Arc> *ofst,
std::vector<typename Arc::StateId> *scc) {
using StateId = typename Arc::StateId;
ofst->DeleteStates();
uint64_t props = 0;
SccVisitor<Arc> scc_visitor(scc, nullptr, nullptr, &props);
DfsVisit(ifst, &scc_visitor);
for (StateId s = 0; s < scc->size(); ++s) {
const auto c = (*scc)[s];
while (c >= ofst->NumStates()) ofst->AddState();
if (s == ifst.Start()) ofst->SetStart(c);
const auto weight = ifst.Final(s);
if (weight != Arc::Weight::Zero())
ofst->SetFinal(c, Plus(ofst->Final(c), weight));
for (ArcIterator<Fst<Arc>> aiter(ifst, s); !aiter.Done(); aiter.Next()) {
auto arc = aiter.Value();
const auto nextc = (*scc)[arc.nextstate];
if (nextc != c) {
while (nextc >= ofst->NumStates()) ofst->AddState();
arc.nextstate = nextc;
ofst->AddArc(c, arc);
}
}
}
ofst->SetProperties(kAcyclic | kInitialAcyclic, kAcyclic | kInitialAcyclic);
}
} // namespace fst
#endif // FST_CONNECT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/icu.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// This library implements an unrestricted Thompson/Pike UTF-8 parser and
// serializer. UTF-8 is a restricted subset of this byte stream encoding. For
// a description of the encoding details, see:
//
// http://en.wikipedia.org/wiki/UTF-8
#ifndef FST_ICU_H_
#define FST_ICU_H_
#include <sstream>
#include <vector>
#include <fst/log.h>
namespace fst {
// This function writes UTF-8 strings into a vector of Labels, truncating if
// necessary. It is possible to use this sensibly with as little as 16 bits of
// Label precision (i.e., when all characters are within the Basic Multilingual
// Plane). With 21 bits, one can label all UTF-8 labelpoints, including those
// from the various Astral Planes. Naturally, it is safe to use this with larger
// Labels (e.g., 64 bits).
template <class Label>
bool UTF8StringToLabels(const string &str, std::vector<Label> *labels) {
for (auto it = str.begin(); it != str.end();) {
int c = *it & 0xff;
++it;
if ((c & 0x80) == 0) {
labels->emplace_back(c);
} else {
if ((c & 0xc0) == 0x80) {
LOG(ERROR) << "UTF8StringToLabels: Continuation byte as lead byte";
return false;
}
int count =
(c >= 0xc0) + (c >= 0xe0) + (c >= 0xf0) + (c >= 0xf8) + (c >= 0xfc);
int32_t label = c & ((1 << (6 - count)) - 1);
while (count != 0) {
if (it == str.end()) {
LOG(ERROR) << "UTF8StringToLabels: Truncated UTF-8 byte sequence";
return false;
}
char cb = *it;
++it;
if ((cb & 0xc0) != 0x80) {
LOG(ERROR) << "UTF8StringToLabels: Missing/invalid continuation byte";
return false;
}
label = (label << 6) | (cb & 0x3f);
--count;
}
if (label < 0) {
// Should be unreachable.
LOG(ERROR) << "UTF8StringToLabels: Invalid character found: " << c;
return false;
}
labels->push_back(label);
}
}
return true;
}
template <class Label>
bool LabelsToByteString(const std::vector<Label> &labels, string *str) {
std::ostringstream ostrm;
for (const char label : labels) {
if (label != 0) ostrm << label;
}
*str = ostrm.str();
return !!ostrm;
}
template <class Label>
bool LabelsToUTF8String(const std::vector<Label> &labels, string *str) {
std::ostringstream ostrm;
for (const int32_t label : labels) {
if (label < 0) {
LOG(ERROR) << "LabelsToUTF8String: Invalid character found: " << label;
return false;
} else if (label == 0) {
continue;
} else if (label < 0x80) {
ostrm << static_cast<char>(label);
} else if (label < 0x800) {
ostrm << static_cast<char>((label >> 6) | 0xc0);
ostrm << static_cast<char>((label & 0x3f) | 0x80);
} else if (label < 0x10000) {
ostrm << static_cast<char>((label >> 12) | 0xe0);
ostrm << static_cast<char>(((label >> 6) & 0x3f) | 0x80);
ostrm << static_cast<char>((label & 0x3f) | 0x80);
} else if (label < 0x200000) {
ostrm << static_cast<char>((label >> 18) | 0xf0);
ostrm << static_cast<char>(((label >> 12) & 0x3f) | 0x80);
ostrm << static_cast<char>(((label >> 6) & 0x3f) | 0x80);
ostrm << static_cast<char>((label & 0x3f) | 0x80);
} else if (label < 0x4000000) {
ostrm << static_cast<char>((label >> 24) | 0xf8);
ostrm << static_cast<char>(((label >> 18) & 0x3f) | 0x80);
ostrm << static_cast<char>(((label >> 12) & 0x3f) | 0x80);
ostrm << static_cast<char>(((label >> 6) & 0x3f) | 0x80);
ostrm << static_cast<char>((label & 0x3f) | 0x80);
} else {
ostrm << static_cast<char>((label >> 30) | 0xfc);
ostrm << static_cast<char>(((label >> 24) & 0x3f) | 0x80);
ostrm << static_cast<char>(((label >> 18) & 0x3f) | 0x80);
ostrm << static_cast<char>(((label >> 12) & 0x3f) | 0x80);
ostrm << static_cast<char>(((label >> 6) & 0x3f) | 0x80);
ostrm << static_cast<char>((label & 0x3f) | 0x80);
}
}
*str = ostrm.str();
return !!ostrm;
}
} // namespace fst
#endif // FST_ICU_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions/pdt/expand.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Expands a PDT to an FST.
#ifndef FST_EXTENSIONS_PDT_EXPAND_H_
#define FST_EXTENSIONS_PDT_EXPAND_H_
#include <forward_list>
#include <vector>
#include <fst/log.h>
#include <fst/extensions/pdt/paren.h>
#include <fst/extensions/pdt/pdt.h>
#include <fst/extensions/pdt/reverse.h>
#include <fst/extensions/pdt/shortest-path.h>
#include <fst/cache.h>
#include <fst/mutable-fst.h>
#include <fst/queue.h>
#include <fst/state-table.h>
#include <fst/test-properties.h>
namespace fst {
template <class Arc>
struct PdtExpandFstOptions : public CacheOptions {
bool keep_parentheses;
PdtStack<typename Arc::StateId, typename Arc::Label> *stack;
PdtStateTable<typename Arc::StateId, typename Arc::StateId> *state_table;
explicit PdtExpandFstOptions(
const CacheOptions &opts = CacheOptions(), bool keep_parentheses = false,
PdtStack<typename Arc::StateId, typename Arc::Label> *stack = nullptr,
PdtStateTable<typename Arc::StateId, typename Arc::StateId> *state_table =
nullptr)
: CacheOptions(opts),
keep_parentheses(keep_parentheses),
stack(stack),
state_table(state_table) {}
};
namespace internal {
// Implementation class for PdtExpandFst.
template <class Arc>
class PdtExpandFstImpl : public CacheImpl<Arc> {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using StackId = StateId;
using StateTuple = PdtStateTuple<StateId, StackId>;
using FstImpl<Arc>::SetType;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::Properties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using CacheBaseImpl<CacheState<Arc>>::PushArc;
using CacheBaseImpl<CacheState<Arc>>::HasArcs;
using CacheBaseImpl<CacheState<Arc>>::HasFinal;
using CacheBaseImpl<CacheState<Arc>>::HasStart;
using CacheBaseImpl<CacheState<Arc>>::SetArcs;
using CacheBaseImpl<CacheState<Arc>>::SetFinal;
using CacheBaseImpl<CacheState<Arc>>::SetStart;
PdtExpandFstImpl(const Fst<Arc> &fst,
const std::vector<std::pair<Label, Label>> &parens,
const PdtExpandFstOptions<Arc> &opts)
: CacheImpl<Arc>(opts),
fst_(fst.Copy()),
stack_(opts.stack ? opts.stack : new PdtStack<StateId, Label>(parens)),
state_table_(opts.state_table ? opts.state_table
: new PdtStateTable<StateId, StackId>()),
own_stack_(opts.stack == 0),
own_state_table_(opts.state_table == 0),
keep_parentheses_(opts.keep_parentheses) {
SetType("expand");
const auto props = fst.Properties(kFstProperties, false);
SetProperties(PdtExpandProperties(props), kCopyProperties);
SetInputSymbols(fst.InputSymbols());
SetOutputSymbols(fst.OutputSymbols());
}
PdtExpandFstImpl(const PdtExpandFstImpl &impl)
: CacheImpl<Arc>(impl),
fst_(impl.fst_->Copy(true)),
stack_(new PdtStack<StateId, Label>(*impl.stack_)),
state_table_(new PdtStateTable<StateId, StackId>()),
own_stack_(true),
own_state_table_(true),
keep_parentheses_(impl.keep_parentheses_) {
SetType("expand");
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
}
~PdtExpandFstImpl() override {
if (own_stack_) delete stack_;
if (own_state_table_) delete state_table_;
}
StateId Start() {
if (!HasStart()) {
const auto s = fst_->Start();
if (s == kNoStateId) return kNoStateId;
StateTuple tuple(s, 0);
const auto start = state_table_->FindState(tuple);
SetStart(start);
}
return CacheImpl<Arc>::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) {
const auto &tuple = state_table_->Tuple(s);
const auto weight = fst_->Final(tuple.state_id);
if (weight != Weight::Zero() && tuple.stack_id == 0)
SetFinal(s, weight);
else
SetFinal(s, Weight::Zero());
}
return CacheImpl<Arc>::Final(s);
}
size_t NumArcs(StateId s) {
if (!HasArcs(s)) ExpandState(s);
return CacheImpl<Arc>::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) ExpandState(s);
return CacheImpl<Arc>::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) ExpandState(s);
return CacheImpl<Arc>::NumOutputEpsilons(s);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) {
if (!HasArcs(s)) ExpandState(s);
CacheImpl<Arc>::InitArcIterator(s, data);
}
// Computes the outgoing transitions from a state, creating new destination
// states as needed.
void ExpandState(StateId s) {
StateTuple tuple = state_table_->Tuple(s);
for (ArcIterator<Fst<Arc>> aiter(*fst_, tuple.state_id); !aiter.Done();
aiter.Next()) {
auto arc = aiter.Value();
const auto stack_id = stack_->Find(tuple.stack_id, arc.ilabel);
if (stack_id == -1) { // Non-matching close parenthesis.
continue;
} else if ((stack_id != tuple.stack_id) && !keep_parentheses_) {
// Stack push/pop.
arc.ilabel = 0;
arc.olabel = 0;
}
StateTuple ntuple(arc.nextstate, stack_id);
arc.nextstate = state_table_->FindState(ntuple);
PushArc(s, arc);
}
SetArcs(s);
}
const PdtStack<StackId, Label> &GetStack() const { return *stack_; }
const PdtStateTable<StateId, StackId> &GetStateTable() const {
return *state_table_;
}
private:
// Properties for an expanded PDT.
inline uint64_t PdtExpandProperties(uint64_t inprops) {
return inprops & (kAcceptor | kAcyclic | kInitialAcyclic | kUnweighted);
}
std::unique_ptr<const Fst<Arc>> fst_;
PdtStack<StackId, Label> *stack_;
PdtStateTable<StateId, StackId> *state_table_;
bool own_stack_;
bool own_state_table_;
bool keep_parentheses_;
};
} // namespace internal
// Expands a pushdown transducer (PDT) encoded as an FST into an FST. This
// version is a delayed FST. In the PDT, some transitions are labeled with open
// or close parentheses. To be interpreted as a PDT, the parens must balance on
// a path. The open-close parenthesis label pairs are passed using the parens
// argument. The expansion enforces the parenthesis constraints. The PDT must be
// expandable as an FST.
//
// This class attaches interface to implementation and handles reference
// counting, delegating most methods to ImplToFst.
template <class A>
class PdtExpandFst : public ImplToFst<internal::PdtExpandFstImpl<A>> {
public:
using Arc = A;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using StackId = StateId;
using Store = DefaultCacheStore<Arc>;
using State = typename Store::State;
using Impl = internal::PdtExpandFstImpl<Arc>;
friend class ArcIterator<PdtExpandFst<Arc>>;
friend class StateIterator<PdtExpandFst<Arc>>;
PdtExpandFst(const Fst<Arc> &fst,
const std::vector<std::pair<Label, Label>> &parens)
: ImplToFst<Impl>(
std::make_shared<Impl>(fst, parens, PdtExpandFstOptions<A>())) {}
PdtExpandFst(const Fst<Arc> &fst,
const std::vector<std::pair<Label, Label>> &parens,
const PdtExpandFstOptions<Arc> &opts)
: ImplToFst<Impl>(std::make_shared<Impl>(fst, parens, opts)) {}
// See Fst<>::Copy() for doc.
PdtExpandFst(const PdtExpandFst<Arc> &fst, bool safe = false)
: ImplToFst<Impl>(fst, safe) {}
// Gets a copy of this ExpandFst. See Fst<>::Copy() for further doc.
PdtExpandFst<Arc> *Copy(bool safe = false) const override {
return new PdtExpandFst<Arc>(*this, safe);
}
inline void InitStateIterator(StateIteratorData<Arc> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
const PdtStack<StackId, Label> &GetStack() const {
return GetImpl()->GetStack();
}
const PdtStateTable<StateId, StackId> &GetStateTable() const {
return GetImpl()->GetStateTable();
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
void operator=(const PdtExpandFst &) = delete;
};
// Specialization for PdtExpandFst.
template <class Arc>
class StateIterator<PdtExpandFst<Arc>>
: public CacheStateIterator<PdtExpandFst<Arc>> {
public:
explicit StateIterator(const PdtExpandFst<Arc> &fst)
: CacheStateIterator<PdtExpandFst<Arc>>(fst, fst.GetMutableImpl()) {}
};
// Specialization for PdtExpandFst.
template <class Arc>
class ArcIterator<PdtExpandFst<Arc>>
: public CacheArcIterator<PdtExpandFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const PdtExpandFst<Arc> &fst, StateId s)
: CacheArcIterator<PdtExpandFst<Arc>>(fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->ExpandState(s);
}
};
template <class Arc>
inline void PdtExpandFst<Arc>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<PdtExpandFst<Arc>>(*this);
}
// PrunedExpand prunes the delayed expansion of a pushdown transducer (PDT)
// encoded as an FST into an FST. In the PDT, some transitions are labeled with
// open or close parentheses. To be interpreted as a PDT, the parens must
// balance on a path. The open-close parenthesis label pairs are passed
// using the parens argument. The expansion enforces the parenthesis
// constraints.
//
// The algorithm works by visiting the delayed ExpandFst using a shortest-stack
// first queue discipline and relies on the shortest-distance information
// computed using a reverse shortest-path call to perform the pruning.
//
// The algorithm maintains the same state ordering between the ExpandFst being
// visited (efst_) and the result of pruning written into the MutableFst (ofst_)
// to improve readability.
template <class Arc>
class PdtPrunedExpand {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using StackId = StateId;
using Stack = PdtStack<StackId, Label>;
using StateTable = PdtStateTable<StateId, StackId>;
using SetIterator = typename internal::PdtBalanceData<Arc>::SetIterator;
// Constructor taking as input a PDT specified by by an input FST and a vector
// of parentheses. The keep_parentheses argument specifies whether parentheses
// are replaced by epsilons or not during the expansion. The cache options are
// passed to the underlying ExpandFst.
PdtPrunedExpand(const Fst<Arc> &ifst,
const std::vector<std::pair<Label, Label>> &parens,
bool keep_parentheses = false,
const CacheOptions &opts = CacheOptions())
: ifst_(ifst.Copy()),
keep_parentheses_(keep_parentheses),
stack_(parens),
efst_(ifst, parens,
PdtExpandFstOptions<Arc>(opts, true, &stack_, &state_table_)),
queue_(state_table_, stack_, stack_length_, distance_, fdistance_),
error_(false) {
Reverse(*ifst_, parens, &rfst_);
VectorFst<Arc> path;
reverse_shortest_path_.reset(new PdtShortestPath<Arc, FifoQueue<StateId>>(
rfst_, parens,
PdtShortestPathOptions<Arc, FifoQueue<StateId>>(true, false)));
reverse_shortest_path_->ShortestPath(&path);
error_ = (path.Properties(kError, true) == kError);
balance_data_.reset(reverse_shortest_path_->GetBalanceData()->Reverse(
rfst_.NumStates(), 10, -1));
InitCloseParenMultimap(parens);
}
bool Error() const { return error_; }
// Expands and prunes the input PDT according to the provided weight
// threshold, wirting the result into an output mutable FST.
void Expand(MutableFst<Arc> *ofst, const Weight &threshold);
private:
static constexpr uint8_t kEnqueued = 0x01;
static constexpr uint8_t kExpanded = 0x02;
static constexpr uint8_t kSourceState = 0x04;
// Comparison functor used by the queue:
//
// 1. States corresponding to shortest stack first, and
// 2. for stacks of matching length, reverse lexicographic order is used, and
// 3. for states with the same stack, shortest-first order is used.
class StackCompare {
public:
StackCompare(const StateTable &state_table, const Stack &stack,
const std::vector<StackId> &stack_length,
const std::vector<Weight> &distance,
const std::vector<Weight> &fdistance)
: state_table_(state_table),
stack_(stack),
stack_length_(stack_length),
distance_(distance),
fdistance_(fdistance) {}
bool operator()(StateId s1, StateId s2) const {
auto si1 = state_table_.Tuple(s1).stack_id;
auto si2 = state_table_.Tuple(s2).stack_id;
if (stack_length_[si1] < stack_length_[si2]) return true;
if (stack_length_[si1] > stack_length_[si2]) return false;
// If stack IDs are equal, use A*.
if (si1 == si2) {
return less_(Distance(s1), Distance(s2));
}
// If lengths are equal, uses reverse lexicographic order.
for (; si1 != si2; si1 = stack_.Pop(si1), si2 = stack_.Pop(si2)) {
if (stack_.Top(si1) < stack_.Top(si2)) return true;
if (stack_.Top(si1) > stack_.Top(si2)) return false;
}
return false;
}
private:
Weight Distance(StateId s) const {
return (s < distance_.size()) && (s < fdistance_.size())
? Times(distance_[s], fdistance_[s])
: Weight::Zero();
}
const StateTable &state_table_;
const Stack &stack_;
const std::vector<StackId> &stack_length_;
const std::vector<Weight> &distance_;
const std::vector<Weight> &fdistance_;
const NaturalLess<Weight> less_;
};
class ShortestStackFirstQueue
: public ShortestFirstQueue<StateId, StackCompare> {
public:
ShortestStackFirstQueue(const PdtStateTable<StateId, StackId> &state_table,
const Stack &stack,
const std::vector<StackId> &stack_length,
const std::vector<Weight> &distance,
const std::vector<Weight> &fdistance)
: ShortestFirstQueue<StateId, StackCompare>(StackCompare(
state_table, stack, stack_length, distance, fdistance)) {}
};
void InitCloseParenMultimap(
const std::vector<std::pair<Label, Label>> &parens);
Weight DistanceToDest(StateId source, StateId dest) const;
uint8_t Flags(StateId s) const;
void SetFlags(StateId s, uint8_t flags, uint8_t mask);
Weight Distance(StateId s) const;
void SetDistance(StateId s, Weight weight);
Weight FinalDistance(StateId s) const;
void SetFinalDistance(StateId s, Weight weight);
StateId SourceState(StateId s) const;
void SetSourceState(StateId s, StateId p);
void AddStateAndEnqueue(StateId s);
void Relax(StateId s, const Arc &arc, Weight weight);
bool PruneArc(StateId s, const Arc &arc);
void ProcStart();
void ProcFinal(StateId s);
bool ProcNonParen(StateId s, const Arc &arc, bool add_arc);
bool ProcOpenParen(StateId s, const Arc &arc, StackId si, StackId nsi);
bool ProcCloseParen(StateId s, const Arc &arc);
void ProcDestStates(StateId s, StackId si);
// Input PDT.
std::unique_ptr<Fst<Arc>> ifst_;
// Reversed PDT.
VectorFst<Arc> rfst_;
// Keep parentheses in ofst?
const bool keep_parentheses_;
// State table for efst_.
StateTable state_table_;
// Stack trie.
Stack stack_;
// Expanded PDT.
PdtExpandFst<Arc> efst_;
// Length of stack for given stack ID.
std::vector<StackId> stack_length_;
// Distance from initial state in efst_/ofst.
std::vector<Weight> distance_;
// Distance to final states in efst_/ofst.
std::vector<Weight> fdistance_;
// Queue used to visit efst_.
ShortestStackFirstQueue queue_;
// Construction time failure?
bool error_;
// Status flags for states in efst_/ofst.
std::vector<uint8_t> flags_;
// PDT source state for each expanded state.
std::vector<StateId> sources_;
// Shortest path for rfst_.
std::unique_ptr<PdtShortestPath<Arc, FifoQueue<StateId>>>
reverse_shortest_path_;
std::unique_ptr<internal::PdtBalanceData<Arc>> balance_data_;
// Maps open paren arcs to balancing close paren arcs.
typename PdtShortestPath<Arc, FifoQueue<StateId>>::CloseParenMultimap
close_paren_multimap_;
MutableFst<Arc> *ofst_; // Output FST.
Weight limit_; // Weight limit.
// Maps a state s in ifst (i.e., the source of a closed paranthesis matching
// the top of current_stack_id_ to final states in efst_.
std::unordered_map<StateId, Weight> dest_map_;
// Stack ID of the states currently at the top of the queue, i.e., the states
// currently being popped and processed.
StackId current_stack_id_;
std::ptrdiff_t current_paren_id_; // Paren ID at top of current stack.
std::ptrdiff_t cached_stack_id_;
StateId cached_source_;
// The set of pairs of destination states and weights to final states for the
// source state cached_source_ and the paren ID cached_paren_id_; i.e., the
// set of source states of a closed parenthesis with paren ID cached_paren_id
// balancing an incoming open parenthesis with paren ID cached_paren_id_ in
// state cached_source_.
std::forward_list<std::pair<StateId, Weight>> cached_dest_list_;
NaturalLess<Weight> less_;
};
// Initializes close paren multimap, mapping pairs (s, paren_id) to all the arcs
// out of s labeled with close parenthese for paren_id.
template <class Arc>
void PdtPrunedExpand<Arc>::InitCloseParenMultimap(
const std::vector<std::pair<Label, Label>> &parens) {
std::unordered_map<Label, Label> paren_map;
for (size_t i = 0; i < parens.size(); ++i) {
const auto &pair = parens[i];
paren_map[pair.first] = i;
paren_map[pair.second] = i;
}
for (StateIterator<Fst<Arc>> siter(*ifst_); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
for (ArcIterator<Fst<Arc>> aiter(*ifst_, s); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
const auto it = paren_map.find(arc.ilabel);
if (it == paren_map.end()) continue;
if (arc.ilabel == parens[it->second].second) { // Close paren.
const internal::ParenState<Arc> key(it->second, s);
close_paren_multimap_.emplace(key, arc);
}
}
}
}
// Returns the weight of the shortest balanced path from source to dest
// in ifst_; dest must be the source state of a close paren arc.
template <class Arc>
typename Arc::Weight PdtPrunedExpand<Arc>::DistanceToDest(StateId source,
StateId dest) const {
using SearchState =
typename PdtShortestPath<Arc, FifoQueue<StateId>>::SearchState;
const SearchState ss(source + 1, dest + 1);
const auto distance =
reverse_shortest_path_->GetShortestPathData().Distance(ss);
VLOG(2) << "D(" << source << ", " << dest << ") =" << distance;
return distance;
}
// Returns the flags for state s in ofst_.
template <class Arc>
uint8_t PdtPrunedExpand<Arc>::Flags(StateId s) const {
return s < flags_.size() ? flags_[s] : 0;
}
// Modifies the flags for state s in ofst_.
template <class Arc>
void PdtPrunedExpand<Arc>::SetFlags(StateId s, uint8_t flags, uint8_t mask) {
while (flags_.size() <= s) flags_.push_back(0);
flags_[s] &= ~mask;
flags_[s] |= flags & mask;
}
// Returns the shortest distance from the initial state to s in ofst_.
template <class Arc>
typename Arc::Weight PdtPrunedExpand<Arc>::Distance(StateId s) const {
return s < distance_.size() ? distance_[s] : Weight::Zero();
}
// Sets the shortest distance from the initial state to s in ofst_.
template <class Arc>
void PdtPrunedExpand<Arc>::SetDistance(StateId s, Weight weight) {
while (distance_.size() <= s) distance_.push_back(Weight::Zero());
distance_[s] = std::move(weight);
}
// Returns the shortest distance from s to the final states in ofst_.
template <class Arc>
typename Arc::Weight PdtPrunedExpand<Arc>::FinalDistance(StateId s) const {
return s < fdistance_.size() ? fdistance_[s] : Weight::Zero();
}
// Sets the shortest distance from s to the final states in ofst_.
template <class Arc>
void PdtPrunedExpand<Arc>::SetFinalDistance(StateId s, Weight weight) {
while (fdistance_.size() <= s) fdistance_.push_back(Weight::Zero());
fdistance_[s] = std::move(weight);
}
// Returns the PDT source state of state s in ofst_.
template <class Arc>
typename Arc::StateId PdtPrunedExpand<Arc>::SourceState(StateId s) const {
return s < sources_.size() ? sources_[s] : kNoStateId;
}
// Sets the PDT source state of state s in ofst_ to state p'in ifst_.
template <class Arc>
void PdtPrunedExpand<Arc>::SetSourceState(StateId s, StateId p) {
while (sources_.size() <= s) sources_.push_back(kNoStateId);
sources_[s] = p;
}
// Adds state s of efst_ to ofst_ and inserts it in the queue, modifying the
// flags for s accordingly.
template <class Arc>
void PdtPrunedExpand<Arc>::AddStateAndEnqueue(StateId s) {
if (!(Flags(s) & (kEnqueued | kExpanded))) {
while (ofst_->NumStates() <= s) ofst_->AddState();
queue_.Enqueue(s);
SetFlags(s, kEnqueued, kEnqueued);
} else if (Flags(s) & kEnqueued) {
queue_.Update(s);
}
// TODO(allauzen): Check everything is fine when kExpanded?
}
// Relaxes arc out of state s in ofst_ as follows:
//
// 1. If the distance to s times the weight of arc is smaller than
// the currently stored distance for arc.nextstate, updates
// Distance(arc.nextstate) with a new estimate
// 2. If fd is less than the currently stored distance from arc.nextstate to the
// final state, updates with new estimate.
template <class Arc>
void PdtPrunedExpand<Arc>::Relax(StateId s, const Arc &arc, Weight fd) {
const auto nd = Times(Distance(s), arc.weight);
if (less_(nd, Distance(arc.nextstate))) {
SetDistance(arc.nextstate, nd);
SetSourceState(arc.nextstate, SourceState(s));
}
if (less_(fd, FinalDistance(arc.nextstate))) {
SetFinalDistance(arc.nextstate, fd);
}
VLOG(2) << "Relax: " << s << ", d[s] = " << Distance(s) << ", to "
<< arc.nextstate << ", d[ns] = " << Distance(arc.nextstate)
<< ", nd = " << nd;
}
// Returns whether the arc out of state s in efst needs pruned.
template <class Arc>
bool PdtPrunedExpand<Arc>::PruneArc(StateId s, const Arc &arc) {
VLOG(2) << "Prune ?";
auto fd = Weight::Zero();
if ((cached_source_ != SourceState(s)) ||
(cached_stack_id_ != current_stack_id_)) {
cached_source_ = SourceState(s);
cached_stack_id_ = current_stack_id_;
cached_dest_list_.clear();
if (cached_source_ != ifst_->Start()) {
for (auto set_iter =
balance_data_->Find(current_paren_id_, cached_source_);
!set_iter.Done(); set_iter.Next()) {
auto dest = set_iter.Element();
const auto it = dest_map_.find(dest);
cached_dest_list_.push_front(*it);
}
} else {
// TODO(allauzen): queue discipline should prevent this from ever
// happening.
// Replace by a check.
cached_dest_list_.push_front(
std::make_pair(rfst_.Start() - 1, Weight::One()));
}
}
for (auto it = cached_dest_list_.begin(); it != cached_dest_list_.end();
++it) {
const auto d =
DistanceToDest(state_table_.Tuple(arc.nextstate).state_id, it->first);
fd = Plus(fd, Times(d, it->second));
}
Relax(s, arc, fd);
return less_(limit_, Times(Distance(s), Times(arc.weight, fd)));
}
// Adds start state of efst_ to ofst_, enqueues it, and initializes the distance
// data structures.
template <class Arc>
void PdtPrunedExpand<Arc>::ProcStart() {
const auto s = efst_.Start();
AddStateAndEnqueue(s);
ofst_->SetStart(s);
SetSourceState(s, ifst_->Start());
current_stack_id_ = 0;
current_paren_id_ = -1;
stack_length_.push_back(0);
const auto r = rfst_.Start() - 1;
cached_source_ = ifst_->Start();
cached_stack_id_ = 0;
cached_dest_list_.push_front(std::make_pair(r, Weight::One()));
const PdtStateTuple<StateId, StackId> tuple(r, 0);
SetFinalDistance(state_table_.FindState(tuple), Weight::One());
SetDistance(s, Weight::One());
const auto d = DistanceToDest(ifst_->Start(), r);
SetFinalDistance(s, d);
VLOG(2) << d;
}
// Makes s final in ofst_ if shortest accepting path ending in s is below
// threshold.
template <class Arc>
void PdtPrunedExpand<Arc>::ProcFinal(StateId s) {
const auto weight = efst_.Final(s);
if (weight == Weight::Zero()) return;
if (less_(limit_, Times(Distance(s), weight))) return;
ofst_->SetFinal(s, weight);
}
// Returns true when an arc (or meta-arc) leaving state s in efst_ is below the
// threshold. When add_arc is true, arc is added to ofst_.
template <class Arc>
bool PdtPrunedExpand<Arc>::ProcNonParen(StateId s, const Arc &arc,
bool add_arc) {
VLOG(2) << "ProcNonParen: " << s << " to " << arc.nextstate << ", "
<< arc.ilabel << ":" << arc.olabel << " / " << arc.weight
<< ", add_arc = " << (add_arc ? "true" : "false");
if (PruneArc(s, arc)) return false;
if (add_arc) ofst_->AddArc(s, arc);
AddStateAndEnqueue(arc.nextstate);
return true;
}
// Processes an open paren arc leaving state s in ofst_. When the arc is labeled
// with an open paren,
//
// 1. Considers each (shortest) balanced path starting in s by taking the arc
// and ending by a close paren balancing the open paren of as a meta-arc,
// processing and pruning each meta-arc as a non-paren arc, inserting its
// destination to the queue;
// 2. if at least one of these meta-arcs has not been pruned, adds the
// destination of arc to ofst_ as a new source state for the stack ID nsi, and
// inserts it in the queue.
template <class Arc>
bool PdtPrunedExpand<Arc>::ProcOpenParen(StateId s, const Arc &arc, StackId si,
StackId nsi) {
// Updates the stack length when needed.
while (stack_length_.size() <= nsi) stack_length_.push_back(-1);
if (stack_length_[nsi] == -1) stack_length_[nsi] = stack_length_[si] + 1;
const auto ns = arc.nextstate;
VLOG(2) << "Open paren: " << s << "(" << state_table_.Tuple(s).state_id
<< ") to " << ns << "(" << state_table_.Tuple(ns).state_id << ")";
bool proc_arc = false;
auto fd = Weight::Zero();
const auto paren_id = stack_.ParenId(arc.ilabel);
std::forward_list<StateId> sources;
for (auto set_iter =
balance_data_->Find(paren_id, state_table_.Tuple(ns).state_id);
!set_iter.Done(); set_iter.Next()) {
sources.push_front(set_iter.Element());
}
for (const auto source : sources) {
VLOG(2) << "Close paren source: " << source;
const internal::ParenState<Arc> paren_state(paren_id, source);
for (auto it = close_paren_multimap_.find(paren_state);
it != close_paren_multimap_.end() && paren_state == it->first; ++it) {
auto meta_arc = it->second;
const PdtStateTuple<StateId, StackId> tuple(meta_arc.nextstate, si);
meta_arc.nextstate = state_table_.FindState(tuple);
const auto state_id = state_table_.Tuple(ns).state_id;
const auto d = DistanceToDest(state_id, source);
VLOG(2) << state_id << ", " << source;
VLOG(2) << "Meta arc weight = " << arc.weight << " Times " << d
<< " Times " << meta_arc.weight;
meta_arc.weight = Times(arc.weight, Times(d, meta_arc.weight));
proc_arc |= ProcNonParen(s, meta_arc, false);
fd = Plus(
fd,
Times(Times(DistanceToDest(state_table_.Tuple(ns).state_id, source),
it->second.weight),
FinalDistance(meta_arc.nextstate)));
}
}
if (proc_arc) {
VLOG(2) << "Proc open paren " << s << " to " << arc.nextstate;
ofst_->AddArc(
s, keep_parentheses_ ? arc : Arc(0, 0, arc.weight, arc.nextstate));
AddStateAndEnqueue(arc.nextstate);
const auto nd = Times(Distance(s), arc.weight);
if (less_(nd, Distance(arc.nextstate))) SetDistance(arc.nextstate, nd);
// FinalDistance not necessary for source state since pruning decided using
// meta-arcs above. But this is a problem with A*, hence the following.
if (less_(fd, FinalDistance(arc.nextstate)))
SetFinalDistance(arc.nextstate, fd);
SetFlags(arc.nextstate, kSourceState, kSourceState);
}
return proc_arc;
}
// Checks that shortest path through close paren arc in efst_ is below
// threshold, and if so, adds it to ofst_.
template <class Arc>
bool PdtPrunedExpand<Arc>::ProcCloseParen(StateId s, const Arc &arc) {
const auto weight =
Times(Distance(s), Times(arc.weight, FinalDistance(arc.nextstate)));
if (less_(limit_, weight)) return false;
ofst_->AddArc(s,
keep_parentheses_ ? arc : Arc(0, 0, arc.weight, arc.nextstate));
return true;
}
// When state s in ofst_ is a source state for stack ID si, identifies all the
// corresponding possible destination states, that is, all the states in ifst_
// that have an outgoing close paren arc balancing the incoming open paren taken
// to get to s. For each such state t, computes the shortest distance from (t,
// si) to the final states in ofst_. Stores this information in dest_map_.
template <class Arc>
void PdtPrunedExpand<Arc>::ProcDestStates(StateId s, StackId si) {
if (!(Flags(s) & kSourceState)) return;
if (si != current_stack_id_) {
dest_map_.clear();
current_stack_id_ = si;
current_paren_id_ = stack_.Top(current_stack_id_);
VLOG(2) << "StackID " << si << " dequeued for first time";
}
// TODO(allauzen): clean up source state business; rename current function to
// ProcSourceState.
SetSourceState(s, state_table_.Tuple(s).state_id);
const auto paren_id = stack_.Top(si);
for (auto set_iter =
balance_data_->Find(paren_id, state_table_.Tuple(s).state_id);
!set_iter.Done(); set_iter.Next()) {
const auto dest_state = set_iter.Element();
if (dest_map_.find(dest_state) != dest_map_.end()) continue;
auto dest_weight = Weight::Zero();
internal::ParenState<Arc> paren_state(paren_id, dest_state);
for (auto it = close_paren_multimap_.find(paren_state);
it != close_paren_multimap_.end() && paren_state == it->first; ++it) {
const auto &arc = it->second;
const PdtStateTuple<StateId, StackId> tuple(arc.nextstate,
stack_.Pop(si));
dest_weight =
Plus(dest_weight,
Times(arc.weight, FinalDistance(state_table_.FindState(tuple))));
}
dest_map_[dest_state] = dest_weight;
VLOG(2) << "State " << dest_state << " is a dest state for stack ID " << si
<< " with weight " << dest_weight;
}
}
// Expands and prunes the input PDT, writing the result in ofst.
template <class Arc>
void PdtPrunedExpand<Arc>::Expand(MutableFst<Arc> *ofst,
const typename Arc::Weight &threshold) {
ofst_ = ofst;
if (error_) {
ofst_->SetProperties(kError, kError);
return;
}
ofst_->DeleteStates();
ofst_->SetInputSymbols(ifst_->InputSymbols());
ofst_->SetOutputSymbols(ifst_->OutputSymbols());
limit_ = Times(DistanceToDest(ifst_->Start(), rfst_.Start() - 1), threshold);
flags_.clear();
ProcStart();
while (!queue_.Empty()) {
const auto s = queue_.Head();
queue_.Dequeue();
SetFlags(s, kExpanded, kExpanded | kEnqueued);
VLOG(2) << s << " dequeued!";
ProcFinal(s);
StackId stack_id = state_table_.Tuple(s).stack_id;
ProcDestStates(s, stack_id);
for (ArcIterator<PdtExpandFst<Arc>> aiter(efst_, s); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
const auto nextstack_id = state_table_.Tuple(arc.nextstate).stack_id;
if (stack_id == nextstack_id) {
ProcNonParen(s, arc, true);
} else if (stack_id == stack_.Pop(nextstack_id)) {
ProcOpenParen(s, arc, stack_id, nextstack_id);
} else {
ProcCloseParen(s, arc);
}
}
VLOG(2) << "d[" << s << "] = " << Distance(s) << ", fd[" << s
<< "] = " << FinalDistance(s);
}
}
// Expand functions.
template <class Arc>
struct PdtExpandOptions {
using Weight = typename Arc::Weight;
bool connect;
bool keep_parentheses;
Weight weight_threshold;
PdtExpandOptions(bool connect = true, bool keep_parentheses = false,
Weight weight_threshold = Weight::Zero())
: connect(connect),
keep_parentheses(keep_parentheses),
weight_threshold(std::move(weight_threshold)) {}
};
// Expands a pushdown transducer (PDT) encoded as an FST into an FST. This
// version writes the expanded PDT to a mutable FST. In the PDT, some
// transitions are labeled with open or close parentheses. To be interpreted as
// a PDT, the parens must balance on a path. The open-close parenthesis label
// pairs are passed using the parens argument. Expansion enforces the
// parenthesis constraints. The PDT must be expandable as an FST.
template <class Arc>
void Expand(
const Fst<Arc> &ifst,
const std::vector<std::pair<typename Arc::Label, typename Arc::Label>>
&parens,
MutableFst<Arc> *ofst, const PdtExpandOptions<Arc> &opts) {
PdtExpandFstOptions<Arc> eopts;
eopts.gc_limit = 0;
if (opts.weight_threshold == Arc::Weight::Zero()) {
eopts.keep_parentheses = opts.keep_parentheses;
*ofst = PdtExpandFst<Arc>(ifst, parens, eopts);
} else {
PdtPrunedExpand<Arc> pruned_expand(ifst, parens, opts.keep_parentheses);
pruned_expand.Expand(ofst, opts.weight_threshold);
}
if (opts.connect) Connect(ofst);
}
// Expands a pushdown transducer (PDT) encoded as an FST into an FST. This
// version writes the expanded PDT result to a mutable FST. In the PDT, some
// transitions are labeled with open or close parentheses. To be interpreted as
// a PDT, the parens must balance on a path. The open-close parenthesis label
// pairs are passed using the parents argument. Expansion enforces the
// parenthesis constraints. The PDT must be expandable as an FST.
template <class Arc>
void Expand(const Fst<Arc> &ifst,
const std::vector<std::pair<typename Arc::Label, typename Arc::Label>>
&parens, MutableFst<Arc> *ofst, bool connect = true,
bool keep_parentheses = false) {
const PdtExpandOptions<Arc> opts(connect, keep_parentheses);
Expand(ifst, parens, ofst, opts);
}
} // namespace fst
#endif // FST_EXTENSIONS_PDT_EXPAND_H_
| 0 |
coqui_public_repos/TTS-recipes/Thorsten_DE | coqui_public_repos/TTS-recipes/Thorsten_DE/DoubleDecoderConsistency/vocoder_config.json | {
"github_branch":"* dev",
"restore_path":"/home/erogol/Models/LJSpeech/pwgan-July-13-2020_03+47PM-499ecad/checkpoint_675000.pth.tar",
"run_name": "pwgan",
"run_description": "parallel-wavegan for german",
// AUDIO PARAMETERS
"audio":{
// stft parameters
"fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
"win_length": 1024, // stft window length in ms.
"hop_length": 256, // stft window hop-lengh in ms.
"frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
// Audio processing parameters
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate.
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
"ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
// Silence trimming
"do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true)
"trim_db": 60, // threshold for timming silence. Set this according to your dataset.
"do_sound_norm": true,
// Griffin-Lim
"power": 1.5, // value to sharpen wav signals after GL algorithm.
"griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation.
// MelSpectrogram parameters
"num_mels": 80, // size of the mel spec frame.
"mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!!
"spec_gain": 20.0,
// Normalization parameters
"signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"min_level_db": -100, // lower bound for normalization
"symmetric_norm": true, // move normalization to range [-1, 1]
"max_norm": 1.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
"clip_norm": true, // clip normalized values into the range.
"stats_path": "/home/erogol/Data/thorsten-german/scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
},
// DISTRIBUTED TRAINING
// "distributed":{
// "backend": "nccl",
// "url": "tcp:\/\/localhost:54321"
// },
// MODEL PARAMETERS
"use_pqmf": false,
// LOSS PARAMETERS
"use_stft_loss": true,
"use_subband_stft_loss": false, // USE ONLY WITH MULTIBAND MODELS
"use_mse_gan_loss": true,
"use_hinge_gan_loss": false,
"use_feat_match_loss": false, // use only with melgan discriminators
// loss weights
"stft_loss_weight": 0.5,
"subband_stft_loss_weight": 0.5,
"mse_G_loss_weight": 2.5,
"hinge_G_loss_weight": 2.5,
"feat_match_loss_weight": 25,
// multiscale stft loss parameters
"stft_loss_params": {
"n_ffts": [1024, 2048, 512],
"hop_lengths": [120, 240, 50],
"win_lengths": [600, 1200, 240]
},
// subband multiscale stft loss parameters
"subband_stft_loss_params":{
"n_ffts": [384, 683, 171],
"hop_lengths": [30, 60, 10],
"win_lengths": [150, 300, 60]
},
"target_loss": "avg_G_loss", // loss value to pick the best model to save after each epoch
// DISCRIMINATOR
"discriminator_model": "parallel_wavegan_discriminator",
"discriminator_model_params":{
"num_layers": 10
},
"steps_to_start_discriminator": 200000, // steps required to start GAN trainining.1
// GENERATOR
"generator_model": "parallel_wavegan_generator",
"generator_model_params": {
"upsample_factors":[4, 4, 4, 4],
"stacks": 3,
"num_res_blocks": 30,
"aux_context_window": 0
},
// DATASET
"data_path": "/home/erogol/Data/thorsten-german/wavs/",
"feature_path": null,
"seq_len": 25600,
"pad_short": 2000,
"conv_pad": 0,
"use_noise_augment": false,
"use_cache": true,
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING
"batch_size": 6, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
// VALIDATION
"run_eval": true,
"test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
// OPTIMIZER
"epochs": 10000, // total number of epochs to train.
"wd": 0.0, // Weight decay weight.
"gen_clip_grad": -1, // Generator gradient clipping threshold. Apply gradient clipping if > 0
"disc_clip_grad": -1, // Discriminator gradient clipping threshold.
"lr_scheduler_gen": "MultiStepLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
"lr_scheduler_gen_params": {
"gamma": 0.5,
"milestones": [100000, 200000, 300000, 400000, 500000, 600000]
},
"lr_scheduler_disc": "MultiStepLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
"lr_scheduler_disc_params": {
"gamma": 0.5,
"milestones": [100000, 200000, 300000, 400000, 500000, 600000]
},
"lr_gen": 1e-4, // Initial learning rate. If Noam decay is active, maximum learning rate.
"lr_disc": 1e-4,
// TENSORBOARD and LOGGING
"print_step": 25, // Number of steps to log traning on console.
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING
"num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"num_val_loader_workers": 4, // number of evaluation data loader processes.
"eval_split_size": 10,
// PATHS
"output_path": "/home/erogol/Models/thorsten-de/"
}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/script/print.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <ostream>
#include <string>
#include <fst/script/fst-class.h>
#include <fst/script/print.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void PrintFst(const FstClass &fst, std::ostream &ostrm, const string &dest,
const SymbolTable *isyms, const SymbolTable *osyms,
const SymbolTable *ssyms, bool accept, bool show_weight_one,
const string &missing_sym) {
const auto sep = FLAGS_fst_field_separator.substr(0, 1);
FstPrinterArgs args(fst, isyms, osyms, ssyms, accept, show_weight_one, &ostrm,
dest, sep, missing_sym);
Apply<Operation<FstPrinterArgs>>("PrintFst", fst.ArcType(), &args);
}
REGISTER_FST_OPERATION(PrintFst, StdArc, FstPrinterArgs);
REGISTER_FST_OPERATION(PrintFst, LogArc, FstPrinterArgs);
REGISTER_FST_OPERATION(PrintFst, Log64Arc, FstPrinterArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/script/closure.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/closure.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void Closure(MutableFstClass *fst, ClosureType closure_type) {
ClosureArgs args(fst, closure_type);
Apply<Operation<ClosureArgs>>("Closure", fst->ArcType(), &args);
}
REGISTER_FST_OPERATION(Closure, StdArc, ClosureArgs);
REGISTER_FST_OPERATION(Closure, LogArc, ClosureArgs);
REGISTER_FST_OPERATION(Closure, Log64Arc, ClosureArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions/pdt/reverse.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Expands a PDT to an FST.
#ifndef FST_EXTENSIONS_PDT_REVERSE_H_
#define FST_EXTENSIONS_PDT_REVERSE_H_
#include <vector>
#include <fst/mutable-fst.h>
#include <fst/relabel.h>
#include <fst/reverse.h>
namespace fst {
// Reverses a pushdown transducer (PDT) encoded as an FST.
template <class Arc, class RevArc>
void Reverse(const Fst<Arc> &ifst,
const std::vector<
std::pair<typename Arc::Label, typename Arc::Label>> &parens,
MutableFst<RevArc> *ofst) {
using Label = typename Arc::Label;
// Reverses FST component.
Reverse(ifst, ofst);
// Exchanges open and close parenthesis pairs.
std::vector<std::pair<Label, Label>> relabel_pairs;
relabel_pairs.reserve(2 * parens.size());
for (const auto &pair : parens) {
relabel_pairs.emplace_back(pair.first, pair.second);
relabel_pairs.emplace_back(pair.second, pair.first);
}
Relabel(ofst, relabel_pairs, relabel_pairs);
}
} // namespace fst
#endif // FST_EXTENSIONS_PDT_REVERSE_H_
| 0 |
coqui_public_repos/TTS/recipes/ljspeech | coqui_public_repos/TTS/recipes/ljspeech/xtts_v2/train_gpt_xtts.py | import os
from trainer import Trainer, TrainerArgs
from TTS.config.shared_configs import BaseDatasetConfig
from TTS.tts.datasets import load_tts_samples
from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig
from TTS.utils.manage import ModelManager
# Logging parameters
RUN_NAME = "GPT_XTTS_v2.0_LJSpeech_FT"
PROJECT_NAME = "XTTS_trainer"
DASHBOARD_LOGGER = "tensorboard"
LOGGER_URI = None
# Set here the path that the checkpoints will be saved. Default: ./run/training/
OUT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "run", "training")
# Training Parameters
OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False
START_WITH_EVAL = True # if True it will star with evaluation
BATCH_SIZE = 3 # set here the batch size
GRAD_ACUMM_STEPS = 84 # set here the grad accumulation steps
# Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly.
# Define here the dataset that you want to use for the fine-tuning on.
config_dataset = BaseDatasetConfig(
formatter="ljspeech",
dataset_name="ljspeech",
path="/raid/datasets/LJSpeech-1.1_24khz/",
meta_file_train="/raid/datasets/LJSpeech-1.1_24khz/metadata.csv",
language="en",
)
# Add here the configs of the datasets
DATASETS_CONFIG_LIST = [config_dataset]
# Define the path where XTTS v2.0.1 files will be downloaded
CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/")
os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True)
# DVAE files
DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth"
MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth"
# Set the path to the downloaded files
DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK))
MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(MEL_NORM_LINK))
# download DVAE files if needed
if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE):
print(" > Downloading DVAE files!")
ModelManager._download_model_files([MEL_NORM_LINK, DVAE_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True)
# Download XTTS v2.0 checkpoint if needed
TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json"
XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth"
# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file
XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CHECKPOINT_LINK)) # model.pth file
# download XTTS v2.0 files if needed
if not os.path.isfile(TOKENIZER_FILE) or not os.path.isfile(XTTS_CHECKPOINT):
print(" > Downloading XTTS v2.0 files!")
ModelManager._download_model_files(
[TOKENIZER_FILE_LINK, XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
)
# Training sentences generations
SPEAKER_REFERENCE = [
"./tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences
]
LANGUAGE = config_dataset.language
def main():
# init args and config
model_args = GPTArgs(
max_conditioning_length=132300, # 6 secs
min_conditioning_length=66150, # 3 secs
debug_loading_failures=False,
max_wav_length=255995, # ~11.6 seconds
max_text_length=200,
mel_norm_file=MEL_NORM_FILE,
dvae_checkpoint=DVAE_CHECKPOINT,
xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune
tokenizer_file=TOKENIZER_FILE,
gpt_num_audio_tokens=1026,
gpt_start_audio_token=1024,
gpt_stop_audio_token=1025,
gpt_use_masking_gt_prompt_approach=True,
gpt_use_perceiver_resampler=True,
)
# define audio config
audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000)
# training parameters config
config = GPTTrainerConfig(
output_path=OUT_PATH,
model_args=model_args,
run_name=RUN_NAME,
project_name=PROJECT_NAME,
run_description="""
GPT XTTS training
""",
dashboard_logger=DASHBOARD_LOGGER,
logger_uri=LOGGER_URI,
audio=audio_config,
batch_size=BATCH_SIZE,
batch_group_size=48,
eval_batch_size=BATCH_SIZE,
num_loader_workers=8,
eval_split_max_size=256,
print_step=50,
plot_step=100,
log_model_step=1000,
save_step=10000,
save_n_checkpoints=1,
save_checkpoints=True,
# target_loss="loss",
print_eval=False,
# Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters.
optimizer="AdamW",
optimizer_wd_only_on_weights=OPTIMIZER_WD_ONLY_ON_WEIGHTS,
optimizer_params={"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2},
lr=5e-06, # learning rate
lr_scheduler="MultiStepLR",
# it was adjusted accordly for the new step scheme
lr_scheduler_params={"milestones": [50000 * 18, 150000 * 18, 300000 * 18], "gamma": 0.5, "last_epoch": -1},
test_sentences=[
{
"text": "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
"speaker_wav": SPEAKER_REFERENCE,
"language": LANGUAGE,
},
{
"text": "This cake is great. It's so delicious and moist.",
"speaker_wav": SPEAKER_REFERENCE,
"language": LANGUAGE,
},
],
)
# init the model from config
model = GPTTrainer.init_from_config(config)
# load training samples
train_samples, eval_samples = load_tts_samples(
DATASETS_CONFIG_LIST,
eval_split=True,
eval_split_max_size=config.eval_split_max_size,
eval_split_size=config.eval_split_size,
)
# init the trainer and 🚀
trainer = Trainer(
TrainerArgs(
restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter
skip_train_epoch=False,
start_with_eval=START_WITH_EVAL,
grad_accum_steps=GRAD_ACUMM_STEPS,
),
config,
output_path=OUT_PATH,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
)
trainer.fit()
if __name__ == "__main__":
main()
| 0 |
coqui_public_repos/TTS/tests | coqui_public_repos/TTS/tests/vocoder_tests/test_vocoder_rwd.py | import numpy as np
import torch
from TTS.vocoder.models.random_window_discriminator import RandomWindowDiscriminator
def test_rwd():
layer = RandomWindowDiscriminator(
cond_channels=80,
window_sizes=(512, 1024, 2048, 4096, 8192),
cond_disc_downsample_factors=[(8, 4, 2, 2, 2), (8, 4, 2, 2), (8, 4, 2), (8, 4), (4, 2, 2)],
hop_length=256,
)
x = torch.rand([4, 1, 22050])
c = torch.rand([4, 80, 22050 // 256])
scores, _ = layer(x, c)
assert len(scores) == 10
assert np.all(scores[0].shape == (4, 1, 1))
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/bin/fstdraw.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_bool(acceptor, false, "Input in acceptor format");
DEFINE_string(isymbols, "", "Input label symbol table");
DEFINE_string(osymbols, "", "Output label symbol table");
DEFINE_string(ssymbols, "", "State label symbol table");
DEFINE_bool(numeric, false, "Print numeric labels");
DEFINE_int32(precision, 5, "Set precision (number of char/float)");
DEFINE_string(float_format, "g",
"Floating-point format, one of: \"e\", \"f\", or \"g\"");
DEFINE_bool(show_weight_one, false,
"Print/draw arc weights and final weights equal to Weight::One()");
DEFINE_string(title, "", "Set figure title");
DEFINE_bool(portrait, false, "Portrait mode (def: landscape)");
DEFINE_bool(vertical, false, "Draw bottom-to-top instead of left-to-right");
DEFINE_int32(fontsize, 14, "Set fontsize");
DEFINE_double(height, 11, "Set height");
DEFINE_double(width, 8.5, "Set width");
DEFINE_double(nodesep, 0.25,
"Set minimum separation between nodes (see dot documentation)");
DEFINE_double(ranksep, 0.40,
"Set minimum separation between ranks (see dot documentation)");
DEFINE_bool(allow_negative_labels, false,
"Allow negative labels (not recommended; may cause conflicts)");
int fstdraw_main(int argc, char **argv);
int main(int argc, char **argv) { return fstdraw_main(argc, argv); }
| 0 |
coqui_public_repos/TTS/docs | coqui_public_repos/TTS/docs/source/configuration.md | # Configuration
We use 👩✈️[Coqpit] for configuration management. It provides basic static type checking and serialization capabilities on top of native Python `dataclasses`. Here is how a simple configuration looks like with Coqpit.
```python
from dataclasses import asdict, dataclass, field
from typing import List, Union
from coqpit.coqpit import MISSING, Coqpit, check_argument
@dataclass
class SimpleConfig(Coqpit):
val_a: int = 10
val_b: int = None
val_d: float = 10.21
val_c: str = "Coqpit is great!"
vol_e: bool = True
# mandatory field
# raise an error when accessing the value if it is not changed. It is a way to define
val_k: int = MISSING
# optional field
val_dict: dict = field(default_factory=lambda: {"val_aa": 10, "val_ss": "This is in a dict."})
# list of list
val_listoflist: List[List] = field(default_factory=lambda: [[1, 2], [3, 4]])
val_listofunion: List[List[Union[str, int, bool]]] = field(
default_factory=lambda: [[1, 3], [1, "Hi!"], [True, False]]
)
def check_values(
self,
): # you can define explicit constraints manually or by`check_argument()`
"""Check config fields"""
c = asdict(self) # avoid unexpected changes on `self`
check_argument("val_a", c, restricted=True, min_val=10, max_val=2056)
check_argument("val_b", c, restricted=True, min_val=128, max_val=4058, allow_none=True)
check_argument("val_c", c, restricted=True)
```
In TTS, each model must have a configuration class that exposes all the values necessary for its lifetime.
It defines model architecture, hyper-parameters, training, and inference settings. For our models, we merge all the fields in a single configuration class for ease. It may not look like a wise practice but enables easier bookkeeping and reproducible experiments.
The general configuration hierarchy looks like below:
```
ModelConfig()
|
| -> ... # model specific configurations
| -> ModelArgs() # model class arguments
| -> BaseDatasetConfig() # only for tts models
| -> BaseXModelConfig() # Generic fields for `tts` and `vocoder` models.
|
| -> BaseTrainingConfig() # trainer fields
| -> BaseAudioConfig() # audio processing fields
```
In the example above, ```ModelConfig()``` is the final configuration that the model receives and it has all the fields necessary for the model.
We host pre-defined model configurations under ```TTS/<model_class>/configs/```. Although we recommend a unified config class, you can decompose it as you like as for your custom models as long as all the fields for the trainer, model, and inference APIs are provided.
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstconvert-main.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Converts an FST to another type.
#include <cstring>
#include <memory>
#include <string>
#include <fst/flags.h>
#include <fst/script/convert.h>
DECLARE_string(fst_type);
int fstconvert_main(int argc, char **argv) {
namespace s = fst::script;
using fst::script::FstClass;
string usage = "Converts an FST to another type.\n\n Usage: ";
usage += argv[0];
usage += " [in.fst [out.fst]]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
if (argc > 3) {
ShowUsage();
return 1;
}
const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : "";
const string out_name = argc > 2 ? argv[2] : "";
std::unique_ptr<FstClass> ifst(FstClass::Read(in_name));
if (!ifst) return 1;
if (ifst->FstType() != FLAGS_fst_type) {
std::unique_ptr<FstClass> ofst(s::Convert(*ifst, FLAGS_fst_type));
if (!ofst) return 1;
return !ofst->Write(out_name);
} else {
return !ifst->Write(out_name);
}
}
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/win-amd64-cpu-opt.yml | build:
template_file: win-opt-base.tyml
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.win"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.win"
- "index.project.deepspeech.deepspeech.native_client.win.${event.head.sha}"
dependencies:
- "node-gyp-cache"
- "swig-win-amd64"
- "pyenv-win-amd64"
- "tf_win-amd64-cpu-opt"
tensorflow: ${system.tensorflow.win_amd64_cpu.url}
scripts:
build: "taskcluster/win-build.sh"
package: "taskcluster/win-package.sh"
nc_asset_name: "native_client.amd64.cpu.win.tar.xz"
maxRunTime: 14400
metadata:
name: "DeepSpeech Windows AMD64 CPU"
description: "Building DeepSpeech for Windows AMD64, CPU only, optimized version"
| 0 |
coqui_public_repos/STT/training/coqui_stt_training | coqui_public_repos/STT/training/coqui_stt_training/util/io.py | """
A set of I/O utils that allow us to open files on remote storage as if they were present locally and access
into HDFS storage using Tensorflow's C++ FileStream API.
Currently only includes wrappers for Google's GCS, but this can easily be expanded for AWS S3 buckets.
"""
import os
def is_remote_path(path):
"""
Returns True iff the path is one of the remote formats that this
module supports
"""
return str(path).startswith("gs://") or str(path).startswith("hdfs://")
def path_exists_remote(path):
"""
Wrapper that allows existance check of local and remote paths like
`gs://...`
"""
from tensorflow.io import gfile
if is_remote_path(path):
return gfile.exists(path)
return os.path.exists(path)
def copy_remote(src, dst, overwrite=False):
"""
Allows us to copy a file from local to remote or vice versa
"""
from tensorflow.io import gfile
return gfile.copy(src, dst, overwrite)
def open_remote(
path, mode="r", buffering=-1, encoding=None, newline=None, closefd=True, opener=None
):
"""
Wrapper around open() method that can handle remote paths like `gs://...`
off Google Cloud using Tensorflow's IO helpers.
buffering, encoding, newline, closefd, and opener are ignored for remote files
This enables us to do:
with open_remote('gs://.....', mode='w+') as f:
do something with the file f, whether or not we have local access to it
"""
from tensorflow.io import gfile
if is_remote_path(path):
return gfile.GFile(path, mode=mode)
return open(
path,
mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd,
opener=opener,
)
def isdir_remote(path):
"""
Wrapper to check if remote and local paths are directories
"""
from tensorflow.io import gfile
if is_remote_path(path):
return gfile.isdir(path)
return os.path.isdir(path)
def listdir_remote(path):
"""
Wrapper to list paths in local dirs (alternative to using a glob, I suppose)
"""
from tensorflow.io import gfile
if is_remote_path(path):
return gfile.listdir(path)
return os.listdir(path)
def glob_remote(filename):
"""
Wrapper that provides globs on local and remote paths like `gs://...`
"""
from tensorflow.io import gfile
return gfile.glob(filename)
def remove_remote(filename):
"""
Wrapper that can remove local and remote files like `gs://...`
"""
from tensorflow.io import gfile
return gfile.remove(filename)
def rmtree_remote(foldername):
"""
Wrapper that can remove local and remote directories like `gs://...`
"""
from tensorflow.io import gfile
return gfile.rmtree(foldername)
| 0 |
coqui_public_repos/TTS-recipes/LJSpeech | coqui_public_repos/TTS-recipes/LJSpeech/DoubleDecoderConsistency/model_config.json | {
"model": "Tacotron2",
"run_name": "ljspeech-ddc",
"run_description": "tacotron2 with ddc",
// AUDIO PARAMETERS
"audio":{
// stft parameters
"fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
"win_length": 1024, // stft window length in ms.
"hop_length": 256, // stft window hop-lengh in ms.
"frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
// Audio processing parameters
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled.
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
"ref_level_db": 0, // reference level db, theoretically 20db is the sound of air.
// Silence trimming
"do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true)
"trim_db": 60, // threshold for timming silence. Set this according to your dataset.
// Griffin-Lim
"power": 1.5, // value to sharpen wav signals after GL algorithm.
"griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation.
// MelSpectrogram parameters
"num_mels": 80, // size of the mel spec frame.
"mel_fmin": 50.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 7600.0, // maximum freq level for mel-spec. Tune for dataset!!
"spec_gain": 1.0, // scaler value appplied after log transform of spectrogram.
// Normalization parameters
"signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"min_level_db": -100, // lower bound for normalization
"symmetric_norm": true, // move normalization to range [-1, 1]
"max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
"clip_norm": true, // clip normalized values into the range.
"stats_path": "./scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
},
// VOCABULARY PARAMETERS
// if custom character set is not defined,
// default set in symbols.py is used
// "characters":{
// "pad": "_",
// "eos": "~",
// "bos": "^",
// "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ",
// "punctuations":"!'(),-.:;? ",
// "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡqɢʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ"
// },
// DISTRIBUTED TRAINING
"distributed":{
"backend": "nccl",
"url": "tcp:\/\/localhost:54321"
},
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":16,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
"ga_alpha": 0, // weight for guided attention loss. If > 0, guided attention is enabled.
"apex_amp_level": null, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
// VALIDATION
"run_eval": true,
"test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
// OPTIMIZER
"noam_schedule": false, // use noam warmup and lr schedule.
"grad_clip": 1.0, // upper limit for gradients for clipping.
"epochs": 1000, // total number of epochs to train.
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"wd": 0.000001, // Weight decay weight.
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths.
// TACOTRON PRENET
"memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame.
"prenet_type": "original", // "original" or "bn".
"prenet_dropout": false, // enable/disable dropout at prenet.
// TACOTRON ATTENTION
"attention_type": "original", // 'original' or 'graves'
"attention_heads": 4, // number of attention heads (only for 'graves')
"attention_norm": "sigmoid", // softmax or sigmoid.
"windowing": false, // Enables attention windowing. Used only in eval mode.
"use_forward_attn": false, // if it uses forward attention. In general, it aligns faster.
"forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode.
"transition_agent": false, // enable/disable transition agent of forward attention.
"location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset.
"double_decoder_consistency": true, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/
"ddc_r": 7, // reduction rate for coarse decoder.
// STOPNET
"stopnet": true, // Train stopnet predicting the end of synthesis.
"separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
// TENSORBOARD and LOGGING
"print_step": 25, // Number of steps to log training on console.
"tb_plot_step": 100, // Number of steps to plot TB training figures.
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING
"text_cleaner": "phoneme_cleaners",
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"num_val_loader_workers": 4, // number of evaluation data loader processes.
"batch_group_size": 0, //Number of batches to shuffle after bucketing.
"min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training
"max_seq_len": 153, // DATASET-RELATED: maximum text length
// PATHS
"output_path": "tts_model/",
// PHONEMES
"phoneme_cache_path": "phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation.
"phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages
// MULTI-SPEAKER and GST
"use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
"style_wav_for_test": null, // path to style wav file to be used in TacotronGST inference.
"use_gst": false, // TACOTRON ONLY: use global style tokens
// DATASETS
"datasets": // List of datasets. They all merged and they get different speaker_ids.
[
{
"name": "ljspeech",
"path": "LJSpeech-1.1/",
"meta_file_train": "metadata.csv",
"meta_file_val": null
}
]
}
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/ci_scripts/node_tflite-tests.sh | #!/bin/bash
set -xe
source $(dirname "$0")/all-vars.sh
source $(dirname "$0")/all-utils.sh
source $(dirname "$0")/asserts.sh
samplerate=$1
ldc93s1_sample_filename="LDC93S1_pcms16le_1_${samplerate}.wav"
model_source=${STT_TEST_MODEL}
model_name=$(basename "${model_source}")
download_data
node --version
npm --version
export_node_bin_path
check_runtime_nodejs
run_all_inference_tests
run_js_streaming_inference_tests
run_hotword_tests
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/mpdt/read_write_utils.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Definition of ReadLabelTriples based on ReadLabelPairs, like that in
// nlp/fst/lib/util.h for pairs, and similarly for WriteLabelTriples.
#ifndef FST_EXTENSIONS_MPDT_READ_WRITE_UTILS_H_
#define FST_EXTENSIONS_MPDT_READ_WRITE_UTILS_H_
#include <string>
#include <utility>
#include <vector>
#include <fstream>
#include <fst/test-properties.h>
namespace fst {
// Returns true on success.
template <typename Label>
bool ReadLabelTriples(const string &filename,
std::vector<std::pair<Label, Label>> *pairs,
std::vector<Label> *assignments,
bool allow_negative = false) {
std::ifstream fstrm(filename);
if (!fstrm) {
LOG(ERROR) << "ReadIntTriples: Can't open file: " << filename;
return false;
}
static constexpr auto kLineLen = 8096;
char line[kLineLen];
size_t nline = 0;
pairs->clear();
while (fstrm.getline(line, kLineLen)) {
++nline;
std::vector<char *> col;
SplitString(line, "\n\t ", &col, true);
// Empty line or comment?
if (col.empty() || col[0][0] == '\0' || col[0][0] == '#') continue;
if (col.size() != 3) {
LOG(ERROR) << "ReadLabelTriples: Bad number of columns, "
<< "file = " << filename << ", line = " << nline;
return false;
}
bool err;
const Label i1 = StrToint64_t(col[0], filename, nline, allow_negative, &err);
if (err) return false;
const Label i2 = StrToint64_t(col[1], filename, nline, allow_negative, &err);
if (err) return false;
using Level = Label;
const Level i3 = StrToint64_t(col[2], filename, nline, allow_negative, &err);
if (err) return false;
pairs->push_back(std::make_pair(i1, i2));
assignments->push_back(i3);
}
return true;
}
// Returns true on success.
template <typename Label>
bool WriteLabelTriples(const string &filename,
const std::vector<std::pair<Label, Label>> &pairs,
const std::vector<Label> &assignments) {
if (pairs.size() != assignments.size()) {
LOG(ERROR) << "WriteLabelTriples: Pairs and assignments of different sizes";
return false;
}
std::ofstream fstrm(filename);
if (!fstrm) {
LOG(ERROR) << "WriteLabelTriples: Can't open file: " << filename;
return false;
}
for (size_t n = 0; n < pairs.size(); ++n)
fstrm << pairs[n].first << "\t" << pairs[n].second << "\t" << assignments[n]
<< "\n";
if (!fstrm) {
LOG(ERROR) << "WriteLabelTriples: Write failed: "
<< (filename.empty() ? "standard output" : filename);
return false;
}
return true;
}
} // namespace fst
#endif // FST_EXTENSIONS_MPDT_READ_WRITE_UTILS_H_
| 0 |
coqui_public_repos/STT/native_client | coqui_public_repos/STT/native_client/kenlm/BUILDING | KenLM has switched to cmake
cmake .
make -j 4
But they recommend building out of tree
mkdir -p build && cd build
cmake ..
make -j 4
If you only want the query code and do not care about compression (.gz, .bz2, and .xz):
./compile_query_only.sh
Windows:
The windows directory has visual studio files. Note that you need to compile
the kenlm project before build_binary and ngram_query projects.
OSX:
Missing dependencies can be remedied with brew.
brew install cmake boost eigen
Debian/Ubuntu:
sudo apt install build-essential cmake libboost-system-dev libboost-thread-dev libboost-program-options-dev libboost-test-dev libeigen3-dev zlib1g-dev libbz2-dev liblzma-dev
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/graph/graph_viewer.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/graph/graph.h"
#include "core/framework/session_options.h"
#include <unordered_set>
namespace onnxruntime {
class Function;
struct IndexedSubGraph;
} // namespace onnxruntime
namespace onnxruntime {
// use value-based compare to make sure transformer output order is consistent
struct NodeCompare {
bool operator()(const Node* n1, const Node* n2) const;
};
/**
@class GraphViewer
Class that provides a read-only view of the Graph.
@remarks If the underlying Graph is changed, GetNodesInTopologicalOrder and GetRootNodes may become invalid.
*/
class GraphViewer {
public:
/**
Construct a GraphViewer from the provided Graph instance.
*/
explicit GraphViewer(const Graph& graph);
/**
Construct a GraphViewer from the provided Graph instance, filtering to the nodes specified in the IndexedSubGraph
*/
explicit GraphViewer(const Graph& graph, const IndexedSubGraph& filter_info);
/** Gets the Graph name. */
const std::string& Name() const noexcept;
/** Gets the Graph description. */
const std::string& Description() const noexcept;
/** Gets the path of the owning model if any **/
const Path& ModelPath() const noexcept { return graph_->ModelPath(); }
/**
Gets a tensor created from an initializer.
@param tensor_name The tensor name
@param[out] value Sets the pointer to the TensorProto if found, or nullptr if not.
@returns True if found. False if not.
*/
bool GetInitializedTensor(const std::string& tensor_name, const ONNX_NAMESPACE::TensorProto*& value) const;
/** Returns true if an initializer value can be overridden by a graph input with the same name. */
bool CanOverrideInitializer() const noexcept;
/**
Gets the Graph inputs, excluding initializers.
@returns Collection of NodeArg pointers for the graph inputs, excluding inputs that have matching initializers.
@remarks No nullptr values in the returned collection. The order will be the same as in the GraphProto.
Inputs are for filter_info_ if set.
*/
const std::vector<const NodeArg*>& GetInputs() const noexcept;
/**
Gets the Graph inputs, including any initializers.
@returns Collection of NodeArg pointers for all the graph inputs.
@remarks No nullptr values in the returned collection. The order will be the same as in the GraphProto.
Inputs are for filter_info_ if set.
*/
const std::vector<const NodeArg*>& GetInputsIncludingInitializers() const noexcept;
/**
Gets the Graph outputs.
@returns Collection of NodeArg pointers for all the graph outputs.
@remarks No nullptr values in the returned collection. The order will be the same as in the GraphProto.
Outputs are for filter_info_ if set.
*/
const std::vector<const NodeArg*>& GetOutputs() const noexcept;
/** Gets all ValueInfo NodeArg instances in the Graph.
@remarks NOT filtered using filter_info_.
*/
const std::vector<const NodeArg*>& GetValueInfo() const noexcept;
/**
Gets the Node instance at the specified index.
@param node_index Index to retrieve Node from.
@remarks May return nullptr if index no longer points to a valid node due to the node being freed, or if
node is excluded by filter_info_.
*/
const Node* GetNode(NodeIndex node_index) const;
/** Gets an iterator over all the valid Nodes in the Graph.
@remarks Nodes are filtered using filter_info_ if set.
*/
const ConstGraphNodes& Nodes() const noexcept;
/** Gets the number of valid nodes in the Graph.
@remarks Returns the number of nodes in filter_info_ if set.
*/
int NumberOfNodes() const noexcept;
/** Gets the maximum NodeIndex value used by Nodes in the Graph. */
int MaxNodeIndex() const noexcept;
/** Gets the NodeIndex values for the Graph nodes, sorted into topological order.
@remarks Filtered using filter_info_ if set.
*/
const std::vector<NodeIndex>& GetNodesInTopologicalOrder(ExecutionOrder order = ExecutionOrder::DEFAULT) const;
/**
Gets the NodeIndex values for the root nodes in the Graph.
The root nodes are the topmost nodes in the Graph that receive inputs from the Graph inputs
and no other nodes in the Graph.
@remarks Not supported if filter_info_ is set.
*/
const std::vector<NodeIndex>& GetRootNodes() const;
/** Gets all tensors created from initializers. */
const InitializedTensorSet& GetAllInitializedTensors() const noexcept;
/**
Gets the NodeArg instance for the given name.
@returns A NodeArg if found, a nullptr if not.
*/
const NodeArg* GetNodeArg(const std::string& name) const;
/** Gets the map of operator domains to their opset versions. */
const std::unordered_map<std::string, int>& DomainToVersionMap() const noexcept {
return graph_->DomainToVersionMap();
}
/** Checks if this is a Subgraph */
bool IsSubgraph() const;
/** Get the internal graph*/
const Graph& GetGraph() const { return *graph_; }
/**
returns true if 'name' is an initializer, and is constant and cannot be overridden at runtime.
@param check_outer_scope If true and the 'graph_' is a subgraph, check parent graph/s for 'name'
if the name is not found in 'graph_'.
*/
bool IsConstantInitializer(const std::string& name, bool check_outer_scope) const;
/** Get the Node containing this Graph if IsSubgraph is true. Returns nullptr otherwise. */
const Node* ParentNode() const noexcept { return graph_->ParentNode(); }
#if !defined(ORT_MINIMAL_BUILD)
/** Get the consumer nodes of a node arg */
std::vector<const Node*> GetConsumerNodes(const std::string& node_arg_name) const {
return graph_->GetConsumerNodes(node_arg_name);
}
/** Get the producer node of a node arg */
const Node* GetProducerNode(const std::string& node_arg_name) const {
return graph_->GetProducerNode(node_arg_name);
}
#endif
/** Get the filter info that restricts the graph viewer to a subset of nodes if set.
@returns Filter info or nullptr
*/
const IndexedSubGraph* GetFilterInfo() const { return filter_info_; }
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphViewer);
GraphViewer(const Graph& graph, const IndexedSubGraph* filter_info);
const Graph* graph_;
ConstGraphNodes graph_nodes_;
// The NodeIndex values of the graph nodes sorted in topological order.
std::vector<NodeIndex> nodes_in_topological_order_;
#if !defined(ORT_MINIMAL_BUILD)
// The NodeIndex values of the graph nodes sorted in topological order with priority.
std::vector<NodeIndex> nodes_in_topological_order_with_priority_;
#endif
// Graph root nodes.
std::vector<NodeIndex> root_nodes_;
// if we're limiting the view to an IndexedSubGraph we need to create a few pieces of infrastructure that would
// usually come from the full graph
const IndexedSubGraph* filter_info_{nullptr};
std::unordered_set<NodeIndex> filtered_node_indices_;
std::vector<const NodeArg*> filtered_node_inputs_;
std::vector<const NodeArg*> filtered_node_inputs_including_initializers_;
std::vector<const NodeArg*> filtered_node_outputs_;
InitializedTensorSet filtered_initializers_;
};
} // namespace onnxruntime
| 0 |
coqui_public_repos/STT/native_client | coqui_public_repos/STT/native_client/python/setup.cfg | [build_ext]
include-dirs=./
build-lib=temp_build
build-temp=temp_build
[build_py]
build-lib=temp_build
[bdist_wheel]
bdist-dir=temp_build
[install_lib]
build-dir=temp_build
[metadata]
description-file = ../README.rst
| 0 |
coqui_public_repos/TTS/tests | coqui_public_repos/TTS/tests/aux_tests/test_audio_processor.py | import os
import unittest
from tests import get_tests_input_path, get_tests_output_path, get_tests_path
from TTS.config import BaseAudioConfig
from TTS.utils.audio.processor import AudioProcessor
TESTS_PATH = get_tests_path()
OUT_PATH = os.path.join(get_tests_output_path(), "audio_tests")
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
os.makedirs(OUT_PATH, exist_ok=True)
conf = BaseAudioConfig(mel_fmax=8000, pitch_fmax=640, pitch_fmin=1)
# pylint: disable=protected-access
class TestAudio(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ap = AudioProcessor(**conf)
def test_audio_synthesis(self):
"""1. load wav
2. set normalization parameters
3. extract mel-spec
4. invert to wav and save the output
"""
print(" > Sanity check for the process wav -> mel -> wav")
def _test(max_norm, signal_norm, symmetric_norm, clip_norm):
self.ap.max_norm = max_norm
self.ap.signal_norm = signal_norm
self.ap.symmetric_norm = symmetric_norm
self.ap.clip_norm = clip_norm
wav = self.ap.load_wav(WAV_FILE)
mel = self.ap.melspectrogram(wav)
wav_ = self.ap.inv_melspectrogram(mel)
file_name = "/audio_test-melspec_max_norm_{}-signal_norm_{}-symmetric_{}-clip_norm_{}.wav".format(
max_norm, signal_norm, symmetric_norm, clip_norm
)
print(" | > Creating wav file at : ", file_name)
self.ap.save_wav(wav_, OUT_PATH + file_name)
# maxnorm = 1.0
_test(1.0, False, False, False)
_test(1.0, True, False, False)
_test(1.0, True, True, False)
_test(1.0, True, False, True)
_test(1.0, True, True, True)
# maxnorm = 4.0
_test(4.0, False, False, False)
_test(4.0, True, False, False)
_test(4.0, True, True, False)
_test(4.0, True, False, True)
_test(4.0, True, True, True)
def test_normalize(self):
"""Check normalization and denormalization for range values and consistency"""
print(" > Testing normalization and denormalization.")
wav = self.ap.load_wav(WAV_FILE)
wav = self.ap.sound_norm(wav) # normalize audio to get abetter normalization range below.
self.ap.signal_norm = False
x = self.ap.melspectrogram(wav)
x_old = x
self.ap.signal_norm = True
self.ap.symmetric_norm = False
self.ap.clip_norm = False
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(
f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}"
)
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm + 1, x_norm.max()
assert x_norm.min() >= 0 - 1, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = False
self.ap.clip_norm = True
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(
f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}"
)
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= 0, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = True
self.ap.clip_norm = False
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(
f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}"
)
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm + 1, x_norm.max()
assert x_norm.min() >= -self.ap.max_norm - 2, x_norm.min() # pylint: disable=invalid-unary-operand-type
assert x_norm.min() <= 0, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = True
self.ap.clip_norm = True
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(
f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}"
)
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= -self.ap.max_norm, x_norm.min() # pylint: disable=invalid-unary-operand-type
assert x_norm.min() <= 0, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = False
self.ap.max_norm = 1.0
x_norm = self.ap.normalize(x)
print(
f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}"
)
assert (x_old - x).sum() == 0
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= 0, x_norm.min()
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3
self.ap.signal_norm = True
self.ap.symmetric_norm = True
self.ap.max_norm = 1.0
x_norm = self.ap.normalize(x)
print(
f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}"
)
assert (x_old - x).sum() == 0
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= -self.ap.max_norm, x_norm.min() # pylint: disable=invalid-unary-operand-type
assert x_norm.min() < 0, x_norm.min()
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3
def test_scaler(self):
scaler_stats_path = os.path.join(get_tests_input_path(), "scale_stats.npy")
conf.stats_path = scaler_stats_path
conf.preemphasis = 0.0
conf.do_trim_silence = True
conf.signal_norm = True
ap = AudioProcessor(**conf)
mel_mean, mel_std, linear_mean, linear_std, _ = ap.load_stats(scaler_stats_path)
ap.setup_scaler(mel_mean, mel_std, linear_mean, linear_std)
self.ap.signal_norm = False
self.ap.preemphasis = 0.0
# test scaler forward and backward transforms
wav = self.ap.load_wav(WAV_FILE)
mel_reference = self.ap.melspectrogram(wav)
mel_norm = ap.melspectrogram(wav)
mel_denorm = ap.denormalize(mel_norm)
assert abs(mel_reference - mel_denorm).max() < 1e-4
def test_compute_f0(self): # pylint: disable=no-self-use
ap = AudioProcessor(**conf)
wav = ap.load_wav(WAV_FILE)
pitch = ap.compute_f0(wav)
mel = ap.melspectrogram(wav)
assert pitch.shape[0] == mel.shape[1]
| 0 |
coqui_public_repos/STT-models/basque/itml | coqui_public_repos/STT-models/basque/itml/v0.1.1/LICENSE | GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm/lm | coqui_public_repos/inference-engine/third_party/kenlm/lm/interpolate/merge_probabilities.cc | #include "lm/interpolate/merge_probabilities.hh"
#include "lm/common/ngram_stream.hh"
#include "lm/interpolate/bounded_sequence_encoding.hh"
#include "lm/interpolate/interpolate_info.hh"
#include <algorithm>
#include <limits>
#include <numeric>
namespace lm {
namespace interpolate {
/**
* Helper to generate the BoundedSequenceEncoding used for writing the
* from values.
*/
BoundedSequenceEncoding MakeEncoder(const InterpolateInfo &info, uint8_t order) {
util::FixedArray<uint8_t> max_orders(info.orders.size());
for (std::size_t i = 0; i < info.orders.size(); ++i) {
max_orders.push_back(std::min(order, info.orders[i]));
}
return BoundedSequenceEncoding(max_orders.begin(), max_orders.end());
}
namespace {
/**
* A simple wrapper class that holds information needed to read and write
* the ngrams of a particular order. This class has the memory needed to
* buffer the data needed for the recursive process of computing the
* probabilities and "from" values for each component model.
*
* "From" values indicate, for each model, what order (as an index, so -1)
* was backed off to in order to arrive at a probability. For example, if a
* 5-gram model (order index 4) backed off twice, we would write a 2.
*/
class NGramHandler {
public:
NGramHandler(uint8_t order, const InterpolateInfo &ifo,
util::FixedArray<util::stream::ChainPositions> &models_by_order)
: info(ifo),
encoder(MakeEncoder(info, order)),
out_record(order, encoder.EncodedLength()) {
std::size_t count_has_order = 0;
for (std::size_t i = 0; i < models_by_order.size(); ++i) {
count_has_order += (models_by_order[i].size() >= order);
}
inputs_.Init(count_has_order);
for (std::size_t i = 0; i < models_by_order.size(); ++i) {
if (models_by_order[i].size() < order)
continue;
inputs_.push_back(models_by_order[i][order - 1]);
if (inputs_.back()) {
active_.resize(active_.size() + 1);
active_.back().model = i;
active_.back().stream = &inputs_.back();
}
}
// have to init outside since NGramStreams doesn't forward to
// GenericStreams ctor given a ChainPositions
probs.Init(info.Models());
from.Init(info.Models());
for (std::size_t i = 0; i < info.Models(); ++i) {
probs.push_back(0.0);
from.push_back(0);
}
}
struct StreamIndex {
NGramStream<ProbBackoff> *stream;
NGramStream<ProbBackoff> &Stream() { return *stream; }
std::size_t model;
};
std::size_t ActiveSize() const {
return active_.size();
}
/**
* @return the input stream for a particular model that corresponds to
* this ngram order
*/
StreamIndex &operator[](std::size_t idx) {
return active_[idx];
}
void erase(std::size_t idx) {
active_.erase(active_.begin() + idx);
}
const InterpolateInfo &info;
BoundedSequenceEncoding encoder;
PartialProbGamma out_record;
util::FixedArray<float> probs;
util::FixedArray<uint8_t> from;
private:
std::vector<StreamIndex> active_;
NGramStreams<ProbBackoff> inputs_;
};
/**
* A collection of NGramHandlers.
*/
class NGramHandlers : public util::FixedArray<NGramHandler> {
public:
explicit NGramHandlers(std::size_t num)
: util::FixedArray<NGramHandler>(num) {
}
void push_back(
std::size_t order, const InterpolateInfo &info,
util::FixedArray<util::stream::ChainPositions> &models_by_order) {
new (end()) NGramHandler(order, info, models_by_order);
Constructed();
}
};
/**
* The recursive helper function that computes probability and "from"
* values for all ngrams matching a particular suffix.
*
* The current order can be computed as the suffix length + 1. Note that
* the suffix could be empty (suffix_begin == suffix_end == NULL), in which
* case we are handling unigrams with the UNK token as the fallback
* probability.
*
* @param handlers The full collection of handlers
* @param suffix_begin A start iterator for the suffix
* @param suffix_end An end iterator for the suffix
* @param fallback_probs The probabilities of this ngram if we need to
* back off (that is, the probability of the suffix)
* @param fallback_from The order that the corresponding fallback
* probability in the fallback_probs is from
* @param combined_fallback interpolated fallback_probs
* @param outputs The output streams, one for each order
*/
void HandleSuffix(NGramHandlers &handlers, WordIndex *suffix_begin,
WordIndex *suffix_end,
const util::FixedArray<float> &fallback_probs,
const util::FixedArray<uint8_t> &fallback_from,
float combined_fallback,
util::stream::Streams &outputs) {
uint8_t order = std::distance(suffix_begin, suffix_end) + 1;
if (order > outputs.size()) return;
util::stream::Stream &output = outputs[order - 1];
NGramHandler &handler = handlers[order - 1];
while (true) {
// find the next smallest ngram which matches our suffix
// TODO: priority queue driven.
WordIndex *minimum = NULL;
for (std::size_t i = 0; i < handler.ActiveSize(); ++i) {
if (!std::equal(suffix_begin, suffix_end, handler[i].Stream()->begin() + 1))
continue;
// if we either haven't set a minimum yet or this one is smaller than
// the minimum we found before, replace it
WordIndex *last = handler[i].Stream()->begin();
if (!minimum || *last < *minimum) { minimum = handler[i].Stream()->begin(); }
}
// no more ngrams of this order match our suffix, so we're done
if (!minimum) return;
handler.out_record.ReBase(output.Get());
std::copy(minimum, minimum + order, handler.out_record.begin());
// Default case is having backed off.
std::copy(fallback_probs.begin(), fallback_probs.end(), handler.probs.begin());
std::copy(fallback_from.begin(), fallback_from.end(), handler.from.begin());
for (std::size_t i = 0; i < handler.ActiveSize();) {
if (std::equal(handler.out_record.begin(), handler.out_record.end(),
handler[i].Stream()->begin())) {
handler.probs[handler[i].model] = handler.info.lambdas[handler[i].model] * handler[i].Stream()->Value().prob;
handler.from[handler[i].model] = order - 1;
if (++handler[i].Stream()) {
++i;
} else {
handler.erase(i);
}
} else {
++i;
}
}
handler.out_record.Prob() = std::accumulate(handler.probs.begin(), handler.probs.end(), 0.0);
handler.out_record.LowerProb() = combined_fallback;
handler.encoder.Encode(handler.from.begin(),
handler.out_record.FromBegin());
// we've handled this particular ngram, so now recurse to the higher
// order using the current ngram as the suffix
HandleSuffix(handlers, handler.out_record.begin(), handler.out_record.end(),
handler.probs, handler.from, handler.out_record.Prob(), outputs);
// consume the output
++output;
}
}
/**
* Kicks off the recursion for computing the probabilities and "from"
* values for each ngram order. We begin by handling the UNK token that
* should be at the front of each of the unigram input streams. This is
* then output to the stream and it is used as the fallback for handling
* our unigram case, the unigram used as the fallback for the bigram case,
* etc.
*/
void HandleNGrams(NGramHandlers &handlers, util::stream::Streams &outputs) {
PartialProbGamma unk_record(1, 0);
// First: populate the unk probabilities by reading the first unigram
// from each stream
util::FixedArray<float> unk_probs(handlers[0].info.Models());
// start by populating the ngram id from the first stream
lm::NGram<ProbBackoff> ngram = *handlers[0][0].Stream();
unk_record.ReBase(outputs[0].Get());
std::copy(ngram.begin(), ngram.end(), unk_record.begin());
unk_record.Prob() = 0;
// then populate the probabilities into unk_probs while "multiply" the
// model probabilities together into the unk record
//
// note that from doesn't need to be set for unigrams
assert(handlers[0].ActiveSize() == handlers[0].info.Models());
for (std::size_t i = 0; i < handlers[0].info.Models();) {
ngram = *handlers[0][i].Stream();
unk_probs.push_back(handlers[0].info.lambdas[i] * ngram.Value().prob);
unk_record.Prob() += unk_probs[i];
assert(*ngram.begin() == kUNK);
if (++handlers[0][i].Stream()) {
++i;
} else {
handlers[0].erase(i);
}
}
float unk_combined = unk_record.Prob();
unk_record.LowerProb() = unk_combined;
// flush the unk output record
++outputs[0];
// Then, begin outputting everything in lexicographic order: first we'll
// get the unigram then the first bigram with that context, then the
// first trigram with that bigram context, etc., until we exhaust all of
// the ngrams, then all of the (n-1)grams, etc.
//
// This function is the "root" of this recursive process.
util::FixedArray<uint8_t> unk_from(handlers[0].info.Models());
for (std::size_t i = 0; i < handlers[0].info.Models(); ++i) {
unk_from.push_back(0);
}
// the two nulls are to encode that our "fallback" word is the "0-gram"
// case, e.g. we "backed off" to UNK
// TODO: stop generating vocab ids and LowerProb for unigrams.
HandleSuffix(handlers, NULL, NULL, unk_probs, unk_from, unk_combined, outputs);
// Verify we reached the end. And poison!
for (std::size_t i = 0; i < handlers.size(); ++i) {
UTIL_THROW_IF2(handlers[i].ActiveSize(),
"MergeProbabilities did not exhaust all ngram streams");
outputs[i].Poison();
}
}
} // namespace
void MergeProbabilities::Run(const util::stream::ChainPositions &output_pos) {
NGramHandlers handlers(output_pos.size());
for (std::size_t i = 0; i < output_pos.size(); ++i) {
handlers.push_back(i + 1, info_, models_by_order_);
}
util::stream::Streams outputs(output_pos);
HandleNGrams(handlers, outputs);
}
}} // namespaces
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-linux-opt-base.tyml | $if: '(event.event != "push") && (event.event != "tag")'
then:
taskId: ${taskcluster.taskId}
provisionerId: ${taskcluster.docker.provisionerId}
workerType: ${build.workerType}
taskGroupId: ${taskcluster.taskGroupId}
schedulerId: ${taskcluster.schedulerId}
dependencies:
$map: { $eval: build.dependencies }
each(b):
$eval: as_slugid(b)
created: { $fromNow: '0 sec' }
deadline: { $fromNow: '1 day' }
expires: { $fromNow: '7 days' }
payload:
maxRunTime: { $eval: to_int(build.maxRunTime) }
image: ${build.docker_image}
env:
$let:
training: { $eval: as_slugid(build.test_model_task) }
linux_amd64_build: { $eval: as_slugid("linux-amd64-cpu-opt") }
linux_amd64_tflite: { $eval: as_slugid("linux-amd64-tflite-opt") }
linux_amd64_ctc: { $eval: as_slugid("linux-amd64-ctc-opt") }
in:
DEEPSPEECH_TEST_MODEL: https://community-tc.services.mozilla.com/api/queue/v1/task/${training}/artifacts/public/output_graph.pb
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pb
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pbmm
DECODER_ARTIFACTS_ROOT: https://community-tc.services.mozilla.com/api/queue/v1/task/${linux_amd64_ctc}/artifacts/public
PIP_DEFAULT_TIMEOUT: "60"
EXPECTED_TENSORFLOW_VERSION: "${build.tensorflow_git_desc}"
DEBIAN_FRONTEND: "noninteractive"
command:
- "/bin/bash"
- "--login"
- "-cxe"
- $let:
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
in: >
apt-get -qq update && apt-get -qq -y install curl python-simplejson git pixz sox sudo wget && ${extraSystemSetup} &&
adduser --system --home ${system.homedir.linux} ${system.username} &&
cd ${system.homedir.linux} &&
echo -e "#!/bin/bash\nset -xe\n env && id && mkdir ~/DeepSpeech/ && git clone --quiet ${event.head.repo.url} ~/DeepSpeech/ds/ && cd ~/DeepSpeech/ds && git checkout --quiet ${event.head.sha}&& mkdir -p ${system.homedir.linux}/pyenv-root/ && wget -O - ${system.pyenv.linux.url} | tar -C ${system.homedir.linux}/pyenv-root/ -xzf -" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh &&
sudo -H -u ${system.username} --preserve-env /bin/bash ${build.args.tests_cmdline}
artifacts:
"public":
type: "directory"
path: "/tmp/artifacts/"
expires: { $fromNow: '7 days' }
metadata:
name: ${build.metadata.name}
description: ${build.metadata.description}
owner: ${event.head.user.email}
source: ${event.head.repo.url}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/Makefile.in | # Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = src/extensions
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h \
$(top_builddir)/src/include/fst/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
SOURCES =
DIST_SOURCES =
RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
ctags-recursive dvi-recursive html-recursive info-recursive \
install-data-recursive install-dvi-recursive \
install-exec-recursive install-html-recursive \
install-info-recursive install-pdf-recursive \
install-ps-recursive install-recursive installcheck-recursive \
installdirs-recursive pdf-recursive ps-recursive \
tags-recursive uninstall-recursive
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
am__recursive_targets = \
$(RECURSIVE_TARGETS) \
$(RECURSIVE_CLEAN_TARGETS) \
$(am__extra_recursive_targets)
AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
distdir
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = compact compress const far linear lookahead pdt mpdt \
ngram python special
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
am__relativize = \
dir0=`pwd`; \
sed_first='s,^\([^/]*\)/.*$$,\1,'; \
sed_rest='s,^[^/]*/*,,'; \
sed_last='s,^.*/\([^/]*\)$$,\1,'; \
sed_butlast='s,/*[^/]*$$,,'; \
while test -n "$$dir1"; do \
first=`echo "$$dir1" | sed -e "$$sed_first"`; \
if test "$$first" != "."; then \
if test "$$first" = ".."; then \
dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
else \
first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
if test "$$first2" = "$$first"; then \
dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
else \
dir2="../$$dir2"; \
fi; \
dir0="$$dir0"/"$$first"; \
fi; \
fi; \
dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
done; \
reldir="$$dir2"
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DL_LIBS = @DL_LIBS@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@
PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@
PYTHON_LDFLAGS = @PYTHON_LDFLAGS@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_SITE_PKG = @PYTHON_SITE_PKG@
PYTHON_VERSION = @PYTHON_VERSION@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
libfstdir = @libfstdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
@HAVE_COMPACT_TRUE@compactdir = compact
@HAVE_COMPRESS_TRUE@compressdir = compress
@HAVE_CONST_TRUE@constdir = const
@HAVE_FAR_TRUE@fardir = far
@HAVE_GRM_TRUE@fardir = far
@HAVE_PYTHON_TRUE@fardir = far
@HAVE_GRM_TRUE@pdtdir = pdt
@HAVE_MPDT_TRUE@pdtdir = pdt
@HAVE_PDT_TRUE@pdtdir = pdt
@HAVE_GRM_TRUE@mpdtdir = mpdt
@HAVE_MPDT_TRUE@mpdtdir = mpdt
@HAVE_LINEAR_TRUE@lineardir = linear
@HAVE_LOOKAHEAD_TRUE@lookaheaddir = lookahead
@HAVE_NGRAM_TRUE@ngramdir = ngram
@HAVE_PYTHON_TRUE@pywrapfstdir = python
@HAVE_SPECIAL_TRUE@specialdir = special
SUBDIRS = $(compactdir) $(compressdir) $(constdir) $(fardir) $(lineardir) \
$(lookaheaddir) $(pdtdir) $(mpdtdir) $(ngramdir) $(pywrapfstdir) \
$(specialdir)
all: all-recursive
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/extensions/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
# This directory's subdirectories are mostly independent; you can cd
# into them and run 'make' without going through this Makefile.
# To change the values of 'make' variables: instead of editing Makefiles,
# (1) if the variable is set in 'config.status', edit 'config.status'
# (which will cause the Makefiles to be regenerated when you run 'make');
# (2) otherwise, pass the desired values on the 'make' command line.
$(am__recursive_targets):
@fail=; \
if $(am__make_keepgoing); then \
failcom='fail=yes'; \
else \
failcom='exit 1'; \
fi; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
case "$@" in \
distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
*) list='$(SUBDIRS)' ;; \
esac; \
for subdir in $$list; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
dot_seen=yes; \
local_target="$$target-am"; \
else \
local_target="$$target"; \
fi; \
($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| eval $$failcom; \
done; \
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-recursive
TAGS: tags
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
include_option=--etags-include; \
empty_fix=.; \
else \
include_option=--include; \
empty_fix=; \
fi; \
list='$(SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test ! -f $$subdir/TAGS || \
set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-recursive
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-recursive
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
$(am__make_dryrun) \
|| test -d "$(distdir)/$$subdir" \
|| $(MKDIR_P) "$(distdir)/$$subdir" \
|| exit 1; \
dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
$(am__relativize); \
new_distdir=$$reldir; \
dir1=$$subdir; dir2="$(top_distdir)"; \
$(am__relativize); \
new_top_distdir=$$reldir; \
echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
($(am__cd) $$subdir && \
$(MAKE) $(AM_MAKEFLAGS) \
top_distdir="$$new_top_distdir" \
distdir="$$new_distdir" \
am__remove_distdir=: \
am__skip_length_check=: \
am__skip_mode_fix=: \
distdir) \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-recursive
all-am: Makefile
installdirs: installdirs-recursive
installdirs-am:
install: install-recursive
install-exec: install-exec-recursive
install-data: install-data-recursive
uninstall: uninstall-recursive
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-recursive
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-recursive
clean-am: clean-generic clean-libtool mostlyclean-am
distclean: distclean-recursive
-rm -f Makefile
distclean-am: clean-am distclean-generic distclean-tags
dvi: dvi-recursive
dvi-am:
html: html-recursive
html-am:
info: info-recursive
info-am:
install-data-am:
install-dvi: install-dvi-recursive
install-dvi-am:
install-exec-am:
install-html: install-html-recursive
install-html-am:
install-info: install-info-recursive
install-info-am:
install-man:
install-pdf: install-pdf-recursive
install-pdf-am:
install-ps: install-ps-recursive
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-recursive
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-recursive
mostlyclean-am: mostlyclean-generic mostlyclean-libtool
pdf: pdf-recursive
pdf-am:
ps: ps-recursive
ps-am:
uninstall-am:
.MAKE: $(am__recursive_targets) install-am install-strip
.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
check-am clean clean-generic clean-libtool cscopelist-am ctags \
ctags-am distclean distclean-generic distclean-libtool \
distclean-tags distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-dvi \
install-dvi-am install-exec install-exec-am install-html \
install-html-am install-info install-info-am install-man \
install-pdf install-pdf-am install-ps install-ps-am \
install-strip installcheck installcheck-am installdirs \
installdirs-am maintainer-clean maintainer-clean-generic \
mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
ps ps-am tags tags-am uninstall uninstall-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| 0 |
coqui_public_repos/STT-models/czech/comodoro | coqui_public_repos/STT-models/czech/comodoro/v0.3.0/MODEL_CARD.md | # Model card for Czech STT
Jump to section:
- [Model details](#model-details)
- [Intended use](#intended-use)
- [Performance Factors](#performance-factors)
- [Metrics](#metrics)
- [Training data](#training-data)
- [Evaluation data](#evaluation-data)
- [Ethical considerations](#ethical-considerations)
- [Caveats and recommendations](#caveats-and-recommendations)
## Model details
- Person or organization developing model: Trained by [Vojtěch Drábek](https://www.draabek.cz).
- Model language: Czech / čeština / `cs`
- Model date: May 31, 2022
- Model type: `Speech-to-Text`
- Model version: `v0.3.0`
- Compatible with 🐸 STT version: `v0.9.3`
- License: CC-BY-NC 4.0
- Citation details: `@misc{czech-stt, author = {Drábek, Vojtěch}, title = {Czech STT 0.3}, publisher = {comodoro}, journal = {deepspeech-cs}, howpublished = {\url{https://github.com/comodoro/deepspeech-cs}} }`
- Where to send questions or comments about the model: You can leave an issue on [the model release page](https://github.com/comodoro/deepspeech-cs) or [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/) or Matrix channel coqui-ai/STT.
## Intended use
Speech-to-Text for the [Czech Language](https://en.wikipedia.org/wiki/Czech_language) on 16kHz, mono-channel audio.
## Performance Factors
Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
## Metrics
STT models are usually evaluated in terms of their transcription accuracy, deployment, Real-Time Factor, and model size on disk.
#### Transcription Accuracy
More information reported on [Github](https://github.com/comodoro/deepspeech-cs/).
|Test Corpus|WER|CER|
|-----------|---|---|
|Acoustic model|
|Czech Common voice 6.1|40.6%|10.7%|
|Vystadial 2016|50.6%|19.6%|
|Parliament Plenary Hearings|21.3%|5.3%|
|ParCzech 3.0|21%|6.2%|
||
|With the attached scorer|
|Czech Common voice 6.1|15.3%|6.8%|
|Vystadial 2016|35.7%|20.1%|
|Parliament Plenary Hearings|9.7%|3.7%|
|ParCzech 3.0|10.1%|4.5%|
#### Real-Time Factor
Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF.
Recorded average RTF on laptop CPU: `0.73`
#### Model Size
`model.pbmm`: 181M
`model.tflite`: 46M
`scorer`: 461M
### Approaches to uncertainty and variability
Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio.
## Training data
This model was trained on the following corpora:
1. Vystadial 2016 – Czech data
2. OVM – Otázky Václava Moravce
3. Czech Parliament Meetings
4. Large Corpus of Czech Parliament Plenary Hearings
5. Common Voice Czech
6. Some private recordings and parts of audioboooks
## Evaluation data
The model was evaluated on Common Voice Czech, Large Corpus of Czech Parliament Plenary Hearings, Vystadial 2016 – Czech data and ParCzech 3.0 test sets.
## Ethical considerations
Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use.
### Demographic Bias
You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue.
### Surveillance
Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in many countries. You should not assume consent to record and analyze private speech.
## Caveats and recommendations
Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data. | 0 |
coqui_public_repos/inference-engine/third_party | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/NEWS | OpenFst: Release 1.6
* Optimized label lookup in SymbolTable (1.6.9)
* Fixed PROGRAM_FLAGS documentation string in binaries (1.6.8)
* Fixed handling of symbol tables in EpsNormalize (1.6.8)
* Fixed HashMatcher issues with SetState() and Find() consistency (1.6.8)
* Fixed error reporting when FST arc type unknown (1.6.8)
* The `first_path` option to ShortestPath is now optimal for A* (1.6.7)
* Renames SymbolTable::kNoSymbol to kNoSymbol (1.6.7)
* Exposes PowerMapper to the scripting API (1.6.7)
* Fixes linking of the special SOs (1.6.7)
* Fixes error handling in HashMatcher (1.6.6)
* Adds kShortestDelta for operations dependent on shortest-distance (1.6.6)
* Adds Python methods for (un)pickling and (de)serializing FSTs (1.6.6)
* Adds constructive variants of Invert and Project (1.6.6)
* Increases code sharing in MemoryPool/MemoryArena (1.6.6)
* Improves consistency of matcher FST ownership (1.6.6)
* Adds non-trivial A* estimator class (1.6.6)
* Prevents unreachable code generation in libfstscript (1.6.5)
* Adds move constructors for non-trivial weight types (1.6.5)
* Standardizes method names for tuple weight types (1.6.5)
* Eliminates undefined behavior in weight hashing (1.6.5)
* Optimizes binary search in SortedMatcher (1.6.5)
* Adds SetWeight (1.6.5)
* Fixes typing error in Python FAR reader (1.6.4)
* Removes restriction that Prune argument have commutative weights (1.6.3)
* Improves configuration of CompositeWeight readers and writers (1.6.3)
* Improves accuracy of ShortestDistance summation (1.6.3)
* SetFinal now "moves" its weight argument (1.6.3)
* Exposes ArcIterator and EncodeMapper flags in Python (1.6.3)
* Properly sets return codes in FST binaries (1.6.3)
* Eliminates StringWeight macros (1.6.3)
* Finalizes most virtual method overrides (1.6.2)
* Fixes missing includes of <fst/log.h> (1.6.1)
* Adds float format support to FST drawing (1.6.1)
* Extensive modernization for C++11 style (1.6.0)
* Many classes and constants moved into an internal namespace (1.6.0)
* Adds HashMatcher (1.6.0)
* Adds Member method to SymbolTable (1.6.0)
* Adds the "special" extension and the fstspecial binary; this is similar to
fstconvert but accepts arguments for specifying special labels (phi, rho,
and sigma) of FSTs (1.6.0)
* Exposes allow_negative_label option for Python symbol tables (1.6.0)
OpenFst: Release 1.5
* Added p-subsequential determinization (1.5.0)
* Generalized epsilon normalization to non-functional case (1.5.0)
* Added general gallic (plus is union) semiring (1.5.0)
* Added FST compression extension (1.5.0)
* Added Python extension (1.5.0)
* Added multiple pushdown transducer (MPDT) support (1.5.0)
* Fixed Isomorphic function (1.5.0)
* Added final method to matchers (1.5.0)
* Fixed various compiler issues (1.5.0)
* Fixed missing Isomorphic components (1.5.0)
* Added UnionWeight (1.5.0)
* Added InputEpsilonMapper and OutputEpsilonMapper arc mappers (1.5.1)
* Added TrivialComposeFilter for more efficient composition when one
of the arguments is epsilon-free (1.5.1)
* Added properties bits kUnweightedCycles and kWeightedCycles (1.5.1)
* Added missing const qualification to (1.5.1):
- SymbolTableIterator access
- EncodeMapper writing to file
- EncodeMapper SymbolTable access
* Replaced internal custom reference-counting (RefCounter) with
C++11 smart pointers where possible, and fixed associated
reference-counting bugs (1.5.1)
* When calling DeleteStates on a MutableFst with a shared impl, the impl
is set to a new empty impl rather than copying and deleting (1.5.1)
* Prepended `Pdt` to the Expand libraries and classes in the PDT
extension, and prepended `MPdt` to the Expand libraries and classes
in the MPDT extension, so that both can be used in the same compilation
unit (1.5.1)
* Added option to PDT Replace for compiling a strongly-regular RTN into a
bounded-stack PDT (1.5.1)
* Improved symbol table support for PDT Replace, including automatic
generation of parentheses symbols (1.5.1)
* Improvements to scripting API (1.5.1):
- Added methods for FST access and mutation
- Added additional checks for arc/weight compatibility
- WeightClass::One and WeightClass::Zero now require a specified weight
type at time of construction
- Improved VectorFstClass constructors
- Added linear-time check for cyclic dependencies in Replace
- Added EncodeMapperClass, a template-free box for an EncodeMapper
* Improvements to the binaries (1.5.1):
- Fixed no-op --precision flag to fstdraw (1.5.1)
- Fixed no-op --file_list_input flag to farcreate (1.5.1)
* Improvements to the Python extension (1.5.1):
- Added methods for creating an empty mutable FST
- Added methods for FST access via state and arc iteration
- Added FST compilation from arclists (cf. fstcompile)
- Added FST printing and drawing
- Added FarReader and FarWriter classes.
* FarReader's GetFst method now returns a pointer (1.5.2)
* Fixed FSTERROR macro (1.5.2)
* Fixed build flags for dlopen (1.5.2)
* Consolidated Python extension into single module (1.5.2)
* Python add_arc now takes an Arc object (1.5.2)
* Adds optional minimization of non-deterministic FSTs (1.5.3)
* Mutation methods of the Python Fst object now support chaining (1.5.3)
* Scripting API and Python weight objects now support semiring arithmetic
(1.5.3)
* Adds RemoveSymbol method to SymbolTable (1.5.4)
* Prevents underflow when using LogProbArcSelector in random generation
(1.5.4)
* Makes random weight generators a single template class (1.5.4)
* Makes weight Properties constexpr where possible (1.5.4)
* Adds check for error when opening files when compiling strings into FARs
(1.5.4)
* Adds routines for parsing string flags to the scripting API (1.5.4)
OpenFst: Release 1.4
* Port to C++11 (1.4.0)
* Disambiguate function added (1.4.0)
* Isomorphic function added (1.4.0)
* Matcher interface augmented with Priority method.
* Special matchers (rho/sigma/phi) can match special symbols
on both input FSTs in composition/intersection provided at each
state pair they only match one side (1.4.0)
* Added ExplicitMatcher to suppress implicit matches (e.g. epsilon
self-loops) (1.4.0)
* Linear{Tagger,Classifier}Fst extensions added (1.4.0).
* Generalized state-reachable to work when input is cyclic (so long as no
final state is in a cycle). This ensures label-reachable (and hence label
lookahead) works with cyclic input (1.4.0)
* Added Condense to build the condensation graph (SCCs condensed to single
states) of an FST (1.4.0).
* Added an option to Reverse to specify whether a super-initial state
should always be created (1.4.0).
* Fixed bugs in FirstCacheStore, PowerWeight, and StringCompiler (1.4.0).
* Changed SymbolTable to use faster data structure (1.4.0).
* Added 'min' disambiguation in determinizaton to keep only the minimum
output in a non-functional transducer when plus=min/max
(flag --disambiguate_output) (1.4.1)
* Compiler issues in linear-fst fixed (1.4.1)
OpenFst: Release 1.3
* Support for non-fatal exits on errors: (1.3.1)
- Added FLAGS_fst_error_fatal: FST errors are
fatal if true (default); o.w. return objects flagged as bad:
e.g., FSTs - kError
prop. true, FST weights - not a Member().
- Added kError property bit signifying bad FST
- Added NoWeight() method to FST weight requirements that returns
weight that is not a Member().
* Various improvements to the FAR extensions (1.3.1)
- a single FST is now a FAR type
- FLAGS_initial_symbols: Uses the symbol table from the
first FST in the archive for all entries"
- Input/output to standard input/output for some FAR and arc types
* --with-icu configuration option no longer needed (1.3.1)
* Improved flags usage esp. if use SET_FLAGS not SetFlags/InitFst (1.3.2)
* Added 'fst' as possible far writer type (1.3.2)
* phi matcher can now accept 0 as the phi label (1.3.2)
* Added ngram-fst extension (1.3.2)
* Improved performance of PDT composition (1.3.3)
* Memory-map support (1.3.3)
* Fixed cross-FST serialization issues (1.3.3)
* Fixed NGramFst off-by-one issue (1.3.3)
* farextract now allows one to specify a list of comma-separated keys,
including key ranges (1.3.3)
* Fixed bug in PDT replace that could cause close paren IDs to collide
with open paren IDs (1.3.4)
OpenFst: Release 1.2
* Added lookahead matching and filtering for faster composition
* Added EditFst for mutation of o.w. immutable FSTs
* Added script sub-namespace defining type FstClass, a non-templated
Fst<Arc> to hold the arc template type internally. This and FST
operations on it allow easier I/O and scripting at the cost of some
runtime dispatching.
* Added per-arc-iterator control of Fst caching.
* Added PowerWeight and Power Arc.
* Added SparsePowerWeight and SparsePowerArc (1.2.4)
* Added SignedLogWeight and SignedLogArc (1.2.4)
* Added ExpectationWeight and ExpectationArc (1.2.4)
* Added AStarQueue, PruneQueue and NaturalPruneQueue disciplines (1.2.6)
* Added Log64Weight and Log64Arc to FST library throughout, including
support throughout scripts/bins/dsos (1.2.8)
* Added delayed RandGenFst that outputs tree of paths weighted
by count (1.2.8)
* Added fstsymbols shell-level command
* Added total weight removal option to pushing
* Changed methods for symbol table mutation:
use MutableInputSymbols()/MutableOutputSymbols().
* Numerous efficiency improvements esp in composition, replace, and caching
* Made "fstmap" handle semiring conversion by adding "to_std", "to_log"
and "to_log64" as supported 'map_type' arguments (1.2.8).
* Made the destructive implementation of RmEpsilon skip over states
admitting no non-epsilon incoming transition (1.2.8).
* Fixed numerous bugs (1.2 through 1.2.9) including:
- improper types of some approximation deltas
- sub-optimal hashing functions
- issues in internal reuse of shortest distance
- hashing bug in FloatWeight
- bug in shortest path queue
- symbol table checksumming issues
- various C++ standards issues
- Visit() behavior when visitation aborted
- Decode() hash performance bug (1.2.1)
- EditFst::Copy(bool) method when the boolean parameter is true (1.2.7)
- SymbolTable memory leak in Invert() (1.2.8)
- Added escaping of " and \ in labels in fstdraw, needed for dot to
function properly (1.2.8)
- Fixed handling of final weight of start state in fstpush (1.2.8)
- Added FST_LL_FORMAT to fix 64-bit integer printf issues (1.2.9)
- Fixed missing <functional> includes (1.2.9)
- Fixed reused local variable names (1.2.9)
- Fixed passing string by reference in FstDraw args (1.2.9)
* Added extensions directories including:
- finite-state archive (FAR) utilities,
added stlist format supporting writing/reading to/from standard out/in
at the library-level (1.2.8)
- compact fsts
- lookahead fsts
- pushdown transducers (improved in 1.2.1 through 1.2.7).
* Added StateMap/StateMapFst; renamed Map/MapFst to ArcMap/ArcMapFst;
map/MapFst retained (but deprecated) (1.2.9)
* Deleted ArcSum() and ArcMerge; use StateMap w/ ArcSumMapper and
ArcUniqueMapper (1.2.9).
* Incremented version of ConstFst/CompactFsts to stop memory alignment
that fails on pipes. Made old version raises errors when read on
pipes (1.2.9).
* Improved determinize hash (1.2.9)
* Removed stdio uses (1.2.10)
* Fixed library ordering issues esp. with newer GNU build tools (1.2.10)
OpenFst: Release 1.1
* Added compat.h to src/include/fst to fix missing defines
* Fixed bug in acyclic minimization that led to non-minimal
(but equivalent) results
* Fixed missing FST typedef in various matchers in matcher.h
so that they can be cascaded
* Opened file streams binary where appropriate
OpenFst: Release 1.0 (Additions to beta version):
* Matcher class added for matching labels at FST states. Includes
special matchers for sigma (any), rho ('rest'), and phi ('fail')
labels.
* Composition generalized with arbitrary filters, matchers, and state
tables.
* Sequence and matching composition filters provided. (see compose.h,
compose-filter.h, matcher.h, state-table.h)
* Unique n-best (see shortest-path.h)
* Pruning in determinization and epsilon removal (see determinize.h,
rmepsilon.h)
* New Fst class:
* Compact Fsts for space-efficient representation (see compact-fst.h)
* New Weight classes:
* MinMax
* Lexicographic
* Miscellaneous bug fixes
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/lib/util.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// FST utility definitions.
#include <fst/util.h>
#include <cctype>
#include <sstream>
#include <string>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/mapped-file.h>
// Utility flag definitions
DEFINE_bool(fst_error_fatal, true,
"FST errors are fatal; o.w. return objects flagged as bad: "
"e.g., FSTs: kError property set, FST weights: not a Member()");
namespace fst {
void SplitString(char *full, const char *delim, std::vector<char *> *vec,
bool omit_empty_strings) {
char *p = full;
while (p) {
if ((p = strpbrk(full, delim))) {
p[0] = '\0';
}
if (!omit_empty_strings || full[0] != '\0') vec->push_back(full);
if (p) full = p + 1;
}
}
int64 StrToInt64(const string &s, const string &src, size_t nline,
bool allow_negative, bool *error) {
int64 n;
const char *cs = s.c_str();
char *p;
if (error) *error = false;
n = strtoll(cs, &p, 10);
if (p < cs + s.size() || (!allow_negative && n < 0)) {
FSTERROR() << "StrToInt64: Bad integer = " << s << "\", source = " << src
<< ", line = " << nline;
if (error) *error = true;
return 0;
}
return n;
}
void ConvertToLegalCSymbol(string *s) {
for (auto it = s->begin(); it != s->end(); ++it) {
if (!isalnum(*it)) {
*it = '_';
}
}
}
// Skips over input characters to align to 'align' bytes. Returns false if can't
// align.
bool AlignInput(std::istream &strm) {
char c;
for (int i = 0; i < MappedFile::kArchAlignment; ++i) {
int64 pos = strm.tellg();
if (pos < 0) {
LOG(ERROR) << "AlignInput: Can't determine stream position";
return false;
}
if (pos % MappedFile::kArchAlignment == 0) break;
strm.read(&c, 1);
}
return true;
}
// Write null output characters to align to 'align' bytes. Returns false if
// can't align.
bool AlignOutput(std::ostream &strm) {
for (int i = 0; i < MappedFile::kArchAlignment; ++i) {
int64 pos = strm.tellp();
if (pos < 0) {
LOG(ERROR) << "AlignOutput: Can't determine stream position";
return false;
}
if (pos % MappedFile::kArchAlignment == 0) break;
strm.write("", 1);
}
return true;
}
} // namespace fst
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-electronjs_v5.0_8k-linux-amd64-opt.yml | build:
template_file: test-linux-opt-base.tyml
docker_image: "ubuntu:16.04"
dependencies:
- "linux-amd64-cpu-opt"
- "test-training_8k-linux-amd64-py36m-opt"
test_model_task: "test-training_8k-linux-amd64-py36m-opt"
system_setup:
>
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} ${electronjs.packages_xenial.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 5.0.6 8k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU ElectronJS v5.0 tests (8kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on ElectronJS v5.0, CPU only, optimized version (8kHz)"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/intersect.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to compute the intersection of two FSAs.
#ifndef FST_INTERSECT_H_
#define FST_INTERSECT_H_
#include <algorithm>
#include <vector>
#include <fst/log.h>
#include <fst/cache.h>
#include <fst/compose.h>
namespace fst {
using IntersectOptions = ComposeOptions;
template <class Arc, class M = Matcher<Fst<Arc>>,
class Filter = SequenceComposeFilter<M>,
class StateTable =
GenericComposeStateTable<Arc, typename Filter::FilterState>>
struct IntersectFstOptions
: public ComposeFstOptions<Arc, M, Filter, StateTable> {
IntersectFstOptions() {}
explicit IntersectFstOptions(const CacheOptions &opts, M *matcher1 = nullptr,
M *matcher2 = nullptr, Filter *filter = nullptr,
StateTable *state_table = nullptr)
: ComposeFstOptions<Arc, M, Filter, StateTable>(opts, matcher1, matcher2,
filter, state_table) {}
};
// Computes the intersection (Hadamard product) of two FSAs. This version is a
// delayed FST. Only strings that are in both automata are retained in the
// result.
//
// The two arguments must be acceptors. One of the arguments must be
// label-sorted.
//
// Complexity: same as ComposeFst.
//
// Caveats: same as ComposeFst.
template <class A>
class IntersectFst : public ComposeFst<A> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using ComposeFst<A>::CreateBase;
using ComposeFst<A>::CreateBase1;
using ComposeFst<A>::Properties;
IntersectFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const CacheOptions &opts = CacheOptions())
: ComposeFst<Arc>(CreateBase(fst1, fst2, opts)) {
const bool acceptors =
fst1.Properties(kAcceptor, true) && fst2.Properties(kAcceptor, true);
if (!acceptors) {
FSTERROR() << "IntersectFst: Input FSTs are not acceptors";
GetMutableImpl()->SetProperties(kError);
}
}
template <class M, class Filter, class StateTable>
IntersectFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const IntersectFstOptions<Arc, M, Filter, StateTable> &opts)
: ComposeFst<Arc>(CreateBase1(fst1, fst2, opts)) {
const bool acceptors =
fst1.Properties(kAcceptor, true) && fst2.Properties(kAcceptor, true);
if (!acceptors) {
FSTERROR() << "IntersectFst: input FSTs are not acceptors";
GetMutableImpl()->SetProperties(kError);
}
}
// See Fst<>::Copy() for doc.
IntersectFst(const IntersectFst<Arc> &fst, bool safe = false)
: ComposeFst<Arc>(fst, safe) {}
// Get a copy of this IntersectFst. See Fst<>::Copy() for further doc.
IntersectFst<Arc> *Copy(bool safe = false) const override {
return new IntersectFst<Arc>(*this, safe);
}
private:
using ImplToFst<internal::ComposeFstImplBase<A>>::GetImpl;
using ImplToFst<internal::ComposeFstImplBase<A>>::GetMutableImpl;
};
// Specialization for IntersectFst.
template <class Arc>
class StateIterator<IntersectFst<Arc>> : public StateIterator<ComposeFst<Arc>> {
public:
explicit StateIterator(const IntersectFst<Arc> &fst)
: StateIterator<ComposeFst<Arc>>(fst) {}
};
// Specialization for IntersectFst.
template <class Arc>
class ArcIterator<IntersectFst<Arc>> : public ArcIterator<ComposeFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const IntersectFst<Arc> &fst, StateId s)
: ArcIterator<ComposeFst<Arc>>(fst, s) {}
};
// Useful alias when using StdArc.
using StdIntersectFst = IntersectFst<StdArc>;
// Computes the intersection (Hadamard product) of two FSAs. This version
// writes the intersection to an output MurableFst. Only strings that are in
// both automata are retained in the result.
//
// The two arguments must be acceptors. One of the arguments must be
// label-sorted.
//
// Complexity: same as Compose.
//
// Caveats: same as Compose.
template <class Arc>
void Intersect(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2,
MutableFst<Arc> *ofst,
const IntersectOptions &opts = IntersectOptions()) {
using M = Matcher<Fst<Arc>>;
if (opts.filter_type == AUTO_FILTER) {
CacheOptions nopts;
nopts.gc_limit = 0; // Cache only the last state for fastest copy.
*ofst = IntersectFst<Arc>(ifst1, ifst2, nopts);
} else if (opts.filter_type == SEQUENCE_FILTER) {
IntersectFstOptions<Arc> iopts;
iopts.gc_limit = 0; // Cache only the last state for fastest copy.
*ofst = IntersectFst<Arc>(ifst1, ifst2, iopts);
} else if (opts.filter_type == ALT_SEQUENCE_FILTER) {
IntersectFstOptions<Arc, M, AltSequenceComposeFilter<M>> iopts;
iopts.gc_limit = 0; // Cache only the last state for fastest copy.
*ofst = IntersectFst<Arc>(ifst1, ifst2, iopts);
} else if (opts.filter_type == MATCH_FILTER) {
IntersectFstOptions<Arc, M, MatchComposeFilter<M>> iopts;
iopts.gc_limit = 0; // Cache only the last state for fastest copy.
*ofst = IntersectFst<Arc>(ifst1, ifst2, iopts);
}
if (opts.connect) Connect(ofst);
}
} // namespace fst
#endif // FST_INTERSECT_H_
| 0 |
coqui_public_repos/TTS/TTS/tts/layers | coqui_public_repos/TTS/TTS/tts/layers/tortoise/transformer.py | import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth=1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else (val,) * depth
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim=-1, alpha=32**2):
t = t / alpha
t = t - torch.amax(t, dim=dim, keepdim=True).detach()
return (t * alpha).softmax(dim=dim)
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# classes
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route={}, layer_dropout=0.0):
super().__init__()
assert all(
len(route) == len(layers) for route in args_route.values()
), "each argument route map must have the same depth as the number of sequential layers"
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim=self.dim, keepdim=True).detach()
return x / maxes
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich=False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout=0.0, mult=4.0):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim),
)
def forward(self, x):
return self.net(x)
# Attention
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal=True, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head**-0.5
self.causal = causal
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout))
def forward(self, x, mask=None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), qkv)
q = q * self.scale
dots = torch.einsum("b h i d, b h j d -> b h i j", q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, "b j -> b () () j")
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device=device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum("b h i j, b h j d -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.to_out(out)
return out
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
causal=True,
heads=8,
dim_head=64,
ff_mult=4,
attn_dropout=0.0,
ff_dropout=0.0,
sparse_attn=False,
sandwich_norm=False,
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
for ind, sparse_attn in zip(range(depth), sparse_layer):
attn = Attention(
dim,
causal=causal,
seq_len=seq_len,
heads=heads,
dim_head=dim_head,
dropout=attn_dropout,
)
ff = FeedForward(dim, mult=ff_mult, dropout=ff_dropout)
layers.append(
nn.ModuleList(
[
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich=sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich=sandwich_norm)),
]
)
)
execute_type = SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {"mask": route_attn}
self.layers = execute_type(layers, args_route=attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs)
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/sparse-tuple-weight.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Sparse version of tuple-weight, based on tuple-weight.h.
// Internally stores sparse key, value pairs in linked list. The default value
// element is the assumed value of unset keys. Internal singleton
// implementation that stores first key, value pair as a initialized member
// variable to avoid unnecessary allocation on heap. Use
// SparseTupleWeightIterator to iterate through the key,value pairs. Note:
// this does NOT iterate through the default value.
//
// Sparse tuple weight set operation definitions.
#ifndef FST_SPARSE_TUPLE_WEIGHT_H_
#define FST_SPARSE_TUPLE_WEIGHT_H_
#include <algorithm>
#include <list>
#include <stack>
#include <string>
#include <unordered_map>
#include <utility>
#include <fst/weight.h>
namespace fst {
template <class W, class K>
class SparseTupleWeightIterator;
// Arbitrary dimension tuple weight, stored as a sorted linked-list.
// W is any weight class, and K is the key value type. kNoKey (-1) is reserved
// for internal use.
template <class W, class K = int>
class SparseTupleWeight {
public:
using ReverseWeight = SparseTupleWeight<typename W::ReverseWeight, K>;
using Iterator = SparseTupleWeightIterator<W, K>;
using Pair = std::pair<K, W>;
using Weight = W;
using Index = K;
constexpr static K kNoKey = -1;
SparseTupleWeight() { Init(); }
template <class Iterator>
SparseTupleWeight(Iterator begin, Iterator end) {
Init();
// Assumes input iterator is sorted.
for (auto it = begin; it != end; ++it) PushBack(*it);
}
// Initialize component `key` to `weight`, with `default_weight` for all
// other components.
SparseTupleWeight(const K &key, const W &weight, const W &default_weight)
: default_(default_weight),
first_(weight == default_weight ? kNoKey : key, weight) {}
explicit SparseTupleWeight(const W &weight) { Init(weight); }
SparseTupleWeight(const SparseTupleWeight &weight) {
Init(weight.DefaultValue());
SetDefaultValue(weight.DefaultValue());
for (Iterator it(weight); !it.Done(); it.Next()) {
PushBack(it.Value());
}
}
SparseTupleWeight(SparseTupleWeight &&weight)
// Don't move the default, so weight.default_ is still valid.
: default_(weight.default_), first_(std::move(weight.first_)),
rest_(std::move(weight.rest_)) {
// move leaves the source in a valid but unspecified state.
// Make sure the source weight is empty.
weight.first_ = Pair(kNoKey, W::NoWeight());
weight.rest_.clear();
}
static const SparseTupleWeight &Zero() {
static const SparseTupleWeight zero(W::Zero());
return zero;
}
static const SparseTupleWeight &One() {
static const SparseTupleWeight one(W::One());
return one;
}
static const SparseTupleWeight &NoWeight() {
static const SparseTupleWeight no_weight(W::NoWeight());
return no_weight;
}
std::istream &Read(std::istream &strm) {
ReadType(strm, &default_);
ReadType(strm, &first_);
return ReadType(strm, &rest_);
}
std::ostream &Write(std::ostream &strm) const {
WriteType(strm, default_);
WriteType(strm, first_);
return WriteType(strm, rest_);
}
SparseTupleWeight &operator=(const SparseTupleWeight &weight) {
if (this == &weight) return *this; // Checks for identity.
Init(weight.DefaultValue());
for (Iterator it(weight); !it.Done(); it.Next()) {
PushBack(it.Value());
}
return *this;
}
SparseTupleWeight &operator=(SparseTupleWeight &&weight) {
if (this == &weight) return *this; // Checks for identity.
default_ = weight.default_;
std::swap(first_, weight.first_);
std::swap(rest_, weight.rest_);
return *this;
}
bool Member() const {
if (!DefaultValue().Member()) return false;
for (Iterator it(*this); !it.Done(); it.Next()) {
if (!it.Value().second.Member()) return false;
}
return true;
}
// Assumes H() function exists for the hash of the key value.
size_t Hash() const {
size_t h = 0;
static const std::hash<K> H;
for (Iterator it(*this); !it.Done(); it.Next()) {
h = 5 * h + H(it.Value().first);
h = 13 * h + it.Value().second.Hash();
}
return h;
}
SparseTupleWeight Quantize(float delta = kDelta) const {
SparseTupleWeight weight;
for (Iterator it(*this); !it.Done(); it.Next()) {
weight.PushBack(it.Value().first, it.Value().second.Quantize(delta));
}
return weight;
}
ReverseWeight Reverse() const {
SparseTupleWeight weight;
for (Iterator it(*this); !it.Done(); it.Next()) {
weight.PushBack(it.Value().first, it.Value().second.Reverse());
}
return ReverseWeight(weight);
}
void Init(const W &default_value = W::Zero()) {
first_ = Pair(kNoKey, W::NoWeight());
// Initialized to the reserved key value.
default_ = default_value;
rest_.clear();
}
size_t Size() const {
if (first_.first == kNoKey) {
return 0;
} else {
return rest_.size() + 1;
}
}
inline void PushBack(const K &key, const W &weight,
bool default_value_check = true) {
PushBack(std::make_pair(key, weight), default_value_check);
}
inline void PushBack(const Pair &pair, bool default_value_check = true) {
if (default_value_check && pair.second == default_) return;
if (first_.first == kNoKey) {
first_ = pair;
} else {
rest_.push_back(pair);
}
}
// Returns the `key`-th component, or the default value if not set.
const W &Value(const K &key) const {
// TODO(rybach): Consider binary search.
Iterator iter(*this);
for (; !iter.Done() && iter.Value().first < key; iter.Next()) continue;
return !iter.Done() && iter.Value().first == key ? iter.Value().second
: DefaultValue();
}
void SetValue(const K &key, const W &w) {
if (w == DefaultValue()) {
ClearValue(key);
} else {
SetValueToNonDefault(key, w);
}
}
void SetDefaultValue(const W &value) { default_ = value; }
const W &DefaultValue() const { return default_; }
private:
void SetValueToNonDefault(const K &key, const W &w) {
// Don't use SparseTupleWeightIterator, since that's const.
if (first_.first == kNoKey) {
first_ = Pair(key, w);
} else if (key < first_.first) {
rest_.push_front(first_);
first_ = Pair(key, w);
} else if (key == first_.first) {
first_.second = w;
} else {
const auto i =
std::find_if(rest_.begin(), rest_.end(),
[key](const Pair &p) { return p.first >= key; });
if (i != rest_.end() && i->first == key) {
i->second = w;
} else {
rest_.insert(i, Pair(key, w));
}
}
}
// Removes the weight value for `key`, having the effect of setting
// it to `DefaultValue()`.
void ClearValue(const K &key) {
if (key == first_.first) {
if (!rest_.empty()) {
first_ = rest_.front();
rest_.pop_front();
} else {
first_.first = kNoKey;
}
} else if (key > first_.first) {
const auto i =
std::find_if(rest_.begin(), rest_.end(),
[key](const Pair &p) { return p.first >= key; });
if (i != rest_.end() && i->first == key) {
rest_.erase(i);
}
}
}
// Assumed default value of uninitialized keys, by default W::Zero().
W default_;
// Key values pairs are first stored in first_, then fill rest_ this way we
// can avoid dynamic allocation in the common case where the weight is a
// single key/value pair.
Pair first_;
std::list<Pair> rest_;
friend class SparseTupleWeightIterator<W, K>;
};
// Declare storage for kNoKey since it is passed by reference.
template <class W, class K>
constexpr K SparseTupleWeight<W, K>::kNoKey;
template <class W, class K>
class SparseTupleWeightIterator {
public:
using Pair = typename SparseTupleWeight<W, K>::Pair;
using const_iterator = typename std::list<Pair>::const_iterator;
using iterator = typename std::list<Pair>::iterator;
explicit SparseTupleWeightIterator(const SparseTupleWeight<W, K> &weight)
: first_(weight.first_),
rest_(weight.rest_),
init_(true),
iter_(rest_.begin()) {}
bool Done() const {
if (init_) {
return first_.first == SparseTupleWeight<W, K>::kNoKey;
} else {
return iter_ == rest_.end();
}
}
const Pair &Value() const { return init_ ? first_ : *iter_; }
void Next() {
if (init_) {
init_ = false;
} else {
++iter_;
}
}
void Reset() {
init_ = true;
iter_ = rest_.begin();
}
private:
const Pair &first_;
const std::list<Pair> &rest_;
bool init_; // In the initialized state?
const_iterator iter_;
};
// M must be callable as a function W(K, W, W).
// K will be kNoKey when mapping the default value.
template <class W, class K, class M>
inline void SparseTupleWeightMap(SparseTupleWeight<W, K> *result,
const SparseTupleWeight<W, K> &w1,
const SparseTupleWeight<W, K> &w2,
const M &operator_mapper) {
SparseTupleWeightIterator<W, K> w1_it(w1);
SparseTupleWeightIterator<W, K> w2_it(w2);
const auto &v1_def = w1.DefaultValue();
const auto &v2_def = w2.DefaultValue();
result->SetDefaultValue(
operator_mapper(SparseTupleWeight<W, K>::kNoKey, v1_def, v2_def));
while (!w1_it.Done() || !w2_it.Done()) {
const auto &k1 = (w1_it.Done()) ? w2_it.Value().first : w1_it.Value().first;
const auto &k2 = (w2_it.Done()) ? w1_it.Value().first : w2_it.Value().first;
const auto &v1 = (w1_it.Done()) ? v1_def : w1_it.Value().second;
const auto &v2 = (w2_it.Done()) ? v2_def : w2_it.Value().second;
if (k1 == k2) {
result->PushBack(k1, operator_mapper(k1, v1, v2));
if (!w1_it.Done()) w1_it.Next();
if (!w2_it.Done()) w2_it.Next();
} else if (k1 < k2) {
result->PushBack(k1, operator_mapper(k1, v1, v2_def));
w1_it.Next();
} else {
result->PushBack(k2, operator_mapper(k2, v1_def, v2));
w2_it.Next();
}
}
}
template <class W, class K>
inline bool operator==(const SparseTupleWeight<W, K> &w1,
const SparseTupleWeight<W, K> &w2) {
const auto &v1_def = w1.DefaultValue();
const auto &v2_def = w2.DefaultValue();
if (v1_def != v2_def) return false;
SparseTupleWeightIterator<W, K> w1_it(w1);
SparseTupleWeightIterator<W, K> w2_it(w2);
while (!w1_it.Done() || !w2_it.Done()) {
const auto &k1 = (w1_it.Done()) ? w2_it.Value().first : w1_it.Value().first;
const auto &k2 = (w2_it.Done()) ? w1_it.Value().first : w2_it.Value().first;
const auto &v1 = (w1_it.Done()) ? v1_def : w1_it.Value().second;
const auto &v2 = (w2_it.Done()) ? v2_def : w2_it.Value().second;
if (k1 == k2) {
if (v1 != v2) return false;
if (!w1_it.Done()) w1_it.Next();
if (!w2_it.Done()) w2_it.Next();
} else if (k1 < k2) {
if (v1 != v2_def) return false;
w1_it.Next();
} else {
if (v1_def != v2) return false;
w2_it.Next();
}
}
return true;
}
template <class W, class K>
inline bool operator!=(const SparseTupleWeight<W, K> &w1,
const SparseTupleWeight<W, K> &w2) {
return !(w1 == w2);
}
template <class W, class K>
inline std::ostream &operator<<(std::ostream &strm,
const SparseTupleWeight<W, K> &weight) {
CompositeWeightWriter writer(strm);
writer.WriteBegin();
writer.WriteElement(weight.DefaultValue());
for (SparseTupleWeightIterator<W, K> it(weight); !it.Done(); it.Next()) {
writer.WriteElement(it.Value().first);
writer.WriteElement(it.Value().second);
}
writer.WriteEnd();
return strm;
}
template <class W, class K>
inline std::istream &operator>>(std::istream &strm,
SparseTupleWeight<W, K> &weight) {
CompositeWeightReader reader(strm);
reader.ReadBegin();
W def;
bool more = reader.ReadElement(&def);
weight.Init(def);
while (more) {
K key;
reader.ReadElement(&key);
W v;
more = reader.ReadElement(&v);
weight.PushBack(key, v);
}
reader.ReadEnd();
return strm;
}
} // namespace fst
#endif // FST_SPARSE_TUPLE_WEIGHT_H_
| 0 |
coqui_public_repos/STT/native_client | coqui_public_repos/STT/native_client/wasm/bindings.cc | #include <emscripten/bind.h>
#include <iostream>
#include <string>
#include <vector>
#include "coqui-stt.h"
using namespace emscripten;
typedef struct TokenMetadataStub {
std::string text;
unsigned int timestep;
float start_time;
static TokenMetadataStub fromTokenMetadata(TokenMetadata tokenMetadata) {
return TokenMetadataStub{
tokenMetadata.text,
tokenMetadata.timestep,
tokenMetadata.start_time
};
}
};
typedef struct CandidateTranscriptStub {
std::vector<TokenMetadataStub> tokens;
double confidence;
static CandidateTranscriptStub fromCandidateTranscript(CandidateTranscript candidateTranscript) {
std::vector<TokenMetadataStub> tokens = std::vector<TokenMetadataStub>(candidateTranscript.num_tokens);
for (int i = 0; i < candidateTranscript.num_tokens; i++) {
const TokenMetadata candidateToken = candidateTranscript.tokens[i];
TokenMetadataStub token = TokenMetadataStub::fromTokenMetadata(candidateToken);
tokens[i] = token;
}
return CandidateTranscriptStub{
tokens,
candidateTranscript.confidence
};
}
};
typedef struct MetadataStub {
std::vector<CandidateTranscriptStub> transcripts;
static MetadataStub fromMetadata(Metadata* metadata) {
std::vector<CandidateTranscriptStub> transcripts = std::vector<CandidateTranscriptStub>(metadata->num_transcripts);
for (int i = 0; i < metadata->num_transcripts; i++) {
const CandidateTranscript candidateTranscript = metadata->transcripts[i];
CandidateTranscriptStub transcript = CandidateTranscriptStub::fromCandidateTranscript(candidateTranscript);
transcripts[i] = transcript;
}
return MetadataStub{
transcripts
};
}
} MetadataStub;
class Stream {
public:
Stream(StreamingState* streamingState)
: streamingState(streamingState) {}
void feedAudioContent(std::vector<short> audioBuffer) {
STT_FeedAudioContent(this->streamingState, audioBuffer.data(), audioBuffer.size());
}
std::string intermediateDecode() {
char* tempResult = STT_IntermediateDecode(this->streamingState);
if (!tempResult) {
// There was some error, return an empty string.
return std::string();
}
// We must manually free the string if something was returned to us.
std::string result = tempResult;
STT_FreeString(tempResult);
return result;
}
MetadataStub intermediateDecodeWithMetadata(unsigned int numResults = 1) {
Metadata* tempResult =
STT_IntermediateDecodeWithMetadata(this->streamingState, numResults);
if (!tempResult) {
// There was some error, return an empty string.
return MetadataStub{};
}
MetadataStub metadata = MetadataStub::fromMetadata(tempResult);
STT_FreeMetadata(tempResult);
return metadata;
}
std::string intermediateDecodeFlushBuffers() {
char* tempResult =
STT_IntermediateDecodeFlushBuffers(this->streamingState);
if (!tempResult) {
// There was some error, return an empty string.
return std::string();
}
// We must manually free the string if something was returned to us.
std::string result = tempResult;
STT_FreeString(tempResult);
return result;
}
MetadataStub intermediateDecodeWithMetadataFlushBuffers(unsigned int numResults = 1) {
Metadata* tempResult =
STT_IntermediateDecodeWithMetadataFlushBuffers(this->streamingState, numResults);
if (!tempResult) {
// There was some error, return an empty string.
return MetadataStub{};
}
MetadataStub metadata = MetadataStub::fromMetadata(tempResult);
STT_FreeMetadata(tempResult);
return metadata;
}
std::string finishStream() {
char* tempResult = STT_FinishStream(this->streamingState);
// Regardless of the result, the stream will be deleted.
this->streamingState = nullptr;
if (!tempResult) {
// There was some error, return an empty string.
return std::string();
}
// We must manually free the string if something was returned to us.
std::string result = tempResult;
STT_FreeString(tempResult);
return result;
}
MetadataStub finishStreamWithMetadata(unsigned int numResults = 1) {
Metadata* tempResult =
STT_FinishStreamWithMetadata(this->streamingState, numResults);
// Regardless of the result, the stream will be deleted.
this->streamingState = nullptr;
if (!tempResult) {
// There was some error, return an empty string.
return MetadataStub{};
}
MetadataStub metadata = MetadataStub::fromMetadata(tempResult);
STT_FreeMetadata(tempResult);
return metadata;
}
private:
StreamingState* streamingState;
};
class Model {
public:
Model(std::string buffer) : state(nullptr), buffer(buffer) {
loadModelFromBuffer();
}
~Model() { STT_FreeModel(state); }
int getSampleRate() const { return STT_GetModelSampleRate(this->state); }
int getModelBeamWidth() const { return STT_GetModelBeamWidth(this->state); }
void setModelBeamWidth(unsigned int width) const {
int status = STT_SetModelBeamWidth(this->state, width);
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not set model beam width: " << error << std::endl;
STT_FreeString(error);
}
}
void freeModel() const { return STT_FreeModel(this->state); }
void enableExternalScorer(std::string scorerBuffer) const {
int status = STT_EnableExternalScorerFromBuffer(this->state, scorerBuffer.c_str(),
scorerBuffer.size());
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not enable external scorer: " << error << std::endl;
STT_FreeString(error);
}
}
void disableExternalScorer() const {
int status = STT_DisableExternalScorer(this->state);
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not set model beam width: " << error << std::endl;
STT_FreeString(error);
}
}
void setScorerAlphaBeta(float alpha, float beta) const {
int status = STT_SetScorerAlphaBeta(this->state, alpha, beta);
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not set scorer alpha beta: " << error << std::endl;
STT_FreeString(error);
}
}
void addHotWord(const std::string& word, float boost) {
int status = STT_AddHotWord(this->state, word.c_str(), boost);
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not add hot word: " << error << std::endl;
STT_FreeString(error);
}
}
void eraseHotWord(const std::string& word) {
int status = STT_EraseHotWord(this->state, word.c_str());
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not erase hot word: " << error << std::endl;
STT_FreeString(error);
}
}
void clearHotWords() {
int status = STT_ClearHotWords(this->state);
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "Could not clear hot words: " << error << std::endl;
STT_FreeString(error);
}
}
std::string speechToText(std::vector<short> audioBuffer) const {
char* tempResult =
STT_SpeechToText(this->state, audioBuffer.data(), audioBuffer.size());
if (!tempResult) {
// There was some error, return an empty string.
return std::string();
}
// We must manually free the string if something was returned to us.
std::string result = tempResult;
STT_FreeString(tempResult);
return result;
}
MetadataStub speechToTextWithMetadata(std::vector<short> audioBuffer,
unsigned int aNumResults) const {
Metadata* tempResult = STT_SpeechToTextWithMetadata(
this->state, audioBuffer.data(), audioBuffer.size(), aNumResults);
MetadataStub metadata = MetadataStub::fromMetadata(tempResult);
STT_FreeMetadata(tempResult);
return metadata;
}
Stream* createStream() {
StreamingState* streamingState;
int status = STT_CreateStream(this->state, &streamingState);
if (status != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(status);
std::cerr << "createStream failed: " << error << std::endl;
STT_FreeString(error);
return nullptr;
}
return new Stream(streamingState);
}
private:
ModelState* state;
std::string buffer;
void loadModelFromBuffer() {
int ret = STT_CreateModelFromBuffer(this->buffer.c_str(),
this->buffer.size(), &this->state);
if (ret != STT_ERR_OK) {
char* error = STT_ErrorCodeToErrorMessage(ret);
std::cerr << "Could not create model: " << error << std::endl;
STT_FreeString(error);
return;
}
}
};
// Binding code
EMSCRIPTEN_BINDINGS(coqui_ai_apis) {
class_<Model>("Model")
.constructor<std::string>()
.function("getSampleRate", &Model::getSampleRate)
.function("getModelBeamWidth", &Model::getModelBeamWidth)
.function("setModelBeamWidth", &Model::setModelBeamWidth)
.function("freeModel", &Model::freeModel)
.function("addHotWord", &Model::addHotWord)
.function("eraseHotWord", &Model::eraseHotWord)
.function("clearHotWords", &Model::clearHotWords)
.function("speechToText", &Model::speechToText)
.function("speechToTextWithMetadata", &Model::speechToTextWithMetadata)
.function("createStream", &Model::createStream, allow_raw_pointers())
.function("enableExternalScorer", &Model::enableExternalScorer)
.function("disableExternalScorer", &Model::disableExternalScorer)
.function("setScorerAlphaBeta", &Model::setScorerAlphaBeta);
class_<Stream>("Stream")
.constructor<StreamingState*>()
.function("feedAudioContent", &Stream::feedAudioContent)
.function("intermediateDecode", &Stream::intermediateDecode)
.function("intermediateDecodeWithMetadata", &Stream::intermediateDecodeWithMetadata)
.function("intermediateDecodeFlushBuffers", &Stream::intermediateDecodeFlushBuffers)
.function("intermediateDecodeWithMetadataFlushBuffers",
&Stream::intermediateDecodeWithMetadataFlushBuffers)
.function("finishStream", &Stream::finishStream)
.function("finishStreamWithMetadata", &Stream::finishStreamWithMetadata);
value_object<TokenMetadataStub>("TokenMetadataStub")
.field("text", &TokenMetadataStub::text)
.field("timestep", &TokenMetadataStub::timestep)
.field("start_time", &TokenMetadataStub::start_time);
value_object<CandidateTranscriptStub>("CandidateTranscriptStub")
.field("tokens", &CandidateTranscriptStub::tokens)
.field("confidence", &CandidateTranscriptStub::confidence);
value_object<MetadataStub>("Metadata")
.field("transcripts", &MetadataStub::transcripts);
register_vector<short>("VectorShort");
register_vector<CandidateTranscriptStub>("CandidateTranscriptStubVector");
register_vector<TokenMetadataStub>("TokenMetadataStubVector");
}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/const-fst.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Simple concrete immutable FST whose states and arcs are each stored in
// single arrays.
#ifndef FST_CONST_FST_H_
#define FST_CONST_FST_H_
#include <climits>
#include <string>
#include <vector>
// Google-only...
// ...Google-only
#include <fst/log.h>
#include <fst/expanded-fst.h>
#include <fst/fst-decl.h>
#include <fst/mapped-file.h>
#include <fst/test-properties.h>
#include <fst/util.h>
namespace fst {
template <class A, class Unsigned>
class ConstFst;
template <class F, class G>
void Cast(const F &, G *);
namespace internal {
// States and arcs each implemented by single arrays, templated on the
// Arc definition. Unsigned is used to represent indices into the arc array.
template <class A, class Unsigned>
class ConstFstImpl : public FstImpl<A> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FstImpl<A>::SetInputSymbols;
using FstImpl<A>::SetOutputSymbols;
using FstImpl<A>::SetType;
using FstImpl<A>::SetProperties;
using FstImpl<A>::Properties;
ConstFstImpl()
: states_(nullptr),
arcs_(nullptr),
nstates_(0),
narcs_(0),
start_(kNoStateId) {
string type = "const";
if (sizeof(Unsigned) != sizeof(uint32_t)) {
type += std::to_string(CHAR_BIT * sizeof(Unsigned));
}
SetType(type);
SetProperties(kNullProperties | kStaticProperties);
}
explicit ConstFstImpl(const Fst<Arc> &fst);
StateId Start() const { return start_; }
Weight Final(StateId s) const { return states_[s].weight; }
StateId NumStates() const { return nstates_; }
size_t NumArcs(StateId s) const { return states_[s].narcs; }
size_t NumInputEpsilons(StateId s) const { return states_[s].niepsilons; }
size_t NumOutputEpsilons(StateId s) const { return states_[s].noepsilons; }
static ConstFstImpl<Arc, Unsigned> *Read(std::istream &strm,
const FstReadOptions &opts);
const Arc *Arcs(StateId s) const { return arcs_ + states_[s].pos; }
// Provide information needed for generic state iterator.
void InitStateIterator(StateIteratorData<Arc> *data) const {
data->base = nullptr;
data->nstates = nstates_;
}
// Provide information needed for the generic arc iterator.
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const {
data->base = nullptr;
data->arcs = arcs_ + states_[s].pos;
data->narcs = states_[s].narcs;
data->ref_count = nullptr;
}
private:
// Used to find narcs_ and nstates_ in Write.
friend class ConstFst<Arc, Unsigned>;
// States implemented by array *states_ below, arcs by (single) *arcs_.
struct ConstState {
Weight weight; // Final weight.
Unsigned pos; // Start of state's arcs in *arcs_.
Unsigned narcs; // Number of arcs (per state).
Unsigned niepsilons; // Number of input epsilons.
Unsigned noepsilons; // Number of output epsilons.
ConstState() : weight(Weight::Zero()) {}
};
// Properties always true of this FST class.
static constexpr uint64_t kStaticProperties = kExpanded;
// Current unaligned file format version. The unaligned version was added and
// made the default since the aligned version does not work on pipes.
static constexpr int kFileVersion = 2;
// Current aligned file format version.
static constexpr int kAlignedFileVersion = 1;
// Minimum file format version supported.
static constexpr int kMinFileVersion = 1;
std::unique_ptr<MappedFile> states_region_; // Mapped file for states.
std::unique_ptr<MappedFile> arcs_region_; // Mapped file for arcs.
ConstState *states_; // States representation.
Arc *arcs_; // Arcs representation.
StateId nstates_; // Number of states.
size_t narcs_; // Number of arcs.
StateId start_; // Initial state.
ConstFstImpl(const ConstFstImpl &) = delete;
ConstFstImpl &operator=(const ConstFstImpl &) = delete;
};
template <class Arc, class Unsigned>
constexpr uint64_t ConstFstImpl<Arc, Unsigned>::kStaticProperties;
template <class Arc, class Unsigned>
constexpr int ConstFstImpl<Arc, Unsigned>::kFileVersion;
template <class Arc, class Unsigned>
constexpr int ConstFstImpl<Arc, Unsigned>::kAlignedFileVersion;
template <class Arc, class Unsigned>
constexpr int ConstFstImpl<Arc, Unsigned>::kMinFileVersion;
template <class Arc, class Unsigned>
ConstFstImpl<Arc, Unsigned>::ConstFstImpl(const Fst<Arc> &fst)
: nstates_(0), narcs_(0) {
string type = "const";
if (sizeof(Unsigned) != sizeof(uint32_t)) {
type += std::to_string(CHAR_BIT * sizeof(Unsigned));
}
SetType(type);
SetInputSymbols(fst.InputSymbols());
SetOutputSymbols(fst.OutputSymbols());
start_ = fst.Start();
// Counts states and arcs.
for (StateIterator<Fst<Arc>> siter(fst); !siter.Done(); siter.Next()) {
++nstates_;
for (ArcIterator<Fst<Arc>> aiter(fst, siter.Value()); !aiter.Done();
aiter.Next()) {
++narcs_;
}
}
states_region_.reset(MappedFile::Allocate(nstates_ * sizeof(*states_)));
arcs_region_.reset(MappedFile::Allocate(narcs_ * sizeof(*arcs_)));
states_ = reinterpret_cast<ConstState *>(states_region_->mutable_data());
arcs_ = reinterpret_cast<Arc *>(arcs_region_->mutable_data());
size_t pos = 0;
for (StateId s = 0; s < nstates_; ++s) {
states_[s].weight = fst.Final(s);
states_[s].pos = pos;
states_[s].narcs = 0;
states_[s].niepsilons = 0;
states_[s].noepsilons = 0;
for (ArcIterator<Fst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
++states_[s].narcs;
if (arc.ilabel == 0) ++states_[s].niepsilons;
if (arc.olabel == 0) ++states_[s].noepsilons;
arcs_[pos] = arc;
++pos;
}
}
const auto props =
fst.Properties(kMutable, false)
? fst.Properties(kCopyProperties, true)
: CheckProperties(
fst, kCopyProperties & ~kWeightedCycles & ~kUnweightedCycles,
kCopyProperties);
SetProperties(props | kStaticProperties);
}
template <class Arc, class Unsigned>
ConstFstImpl<Arc, Unsigned> *ConstFstImpl<Arc, Unsigned>::Read(
std::istream &strm, const FstReadOptions &opts) {
using ConstState = typename ConstFstImpl<Arc, Unsigned>::ConstState;
std::unique_ptr<ConstFstImpl<Arc, Unsigned>> impl(
new ConstFstImpl<Arc, Unsigned>());
FstHeader hdr;
if (!impl->ReadHeader(strm, opts, kMinFileVersion, &hdr)) return nullptr;
impl->start_ = hdr.Start();
impl->nstates_ = hdr.NumStates();
impl->narcs_ = hdr.NumArcs();
// Ensures compatibility.
if (hdr.Version() == kAlignedFileVersion) {
hdr.SetFlags(hdr.GetFlags() | FstHeader::IS_ALIGNED);
}
if ((hdr.GetFlags() & FstHeader::IS_ALIGNED) && !AlignInput(strm)) {
LOG(ERROR) << "ConstFst::Read: Alignment failed: " << opts.source;
return nullptr;
}
size_t b = impl->nstates_ * sizeof(ConstState);
impl->states_region_.reset(
MappedFile::Map(&strm, opts.mode == FstReadOptions::MAP, opts.source, b));
if (!strm || !impl->states_region_) {
LOG(ERROR) << "ConstFst::Read: Read failed: " << opts.source;
return nullptr;
}
impl->states_ =
reinterpret_cast<ConstState *>(impl->states_region_->mutable_data());
if ((hdr.GetFlags() & FstHeader::IS_ALIGNED) && !AlignInput(strm)) {
LOG(ERROR) << "ConstFst::Read: Alignment failed: " << opts.source;
return nullptr;
}
b = impl->narcs_ * sizeof(Arc);
impl->arcs_region_.reset(
MappedFile::Map(&strm, opts.mode == FstReadOptions::MAP, opts.source, b));
if (!strm || !impl->arcs_region_) {
LOG(ERROR) << "ConstFst::Read: Read failed: " << opts.source;
return nullptr;
}
impl->arcs_ = reinterpret_cast<Arc *>(impl->arcs_region_->mutable_data());
return impl.release();
}
} // namespace internal
// Simple concrete immutable FST. This class attaches interface to
// implementation and handles reference counting, delegating most methods to
// ImplToExpandedFst. The unsigned type U is used to represent indices into the
// arc array (default declared in fst-decl.h).
template <class A, class Unsigned>
class ConstFst : public ImplToExpandedFst<internal::ConstFstImpl<A, Unsigned>> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Impl = internal::ConstFstImpl<A, Unsigned>;
using ConstState = typename Impl::ConstState;
friend class StateIterator<ConstFst<Arc, Unsigned>>;
friend class ArcIterator<ConstFst<Arc, Unsigned>>;
template <class F, class G>
void friend Cast(const F &, G *);
ConstFst() : ImplToExpandedFst<Impl>(std::make_shared<Impl>()) {}
explicit ConstFst(const Fst<Arc> &fst)
: ImplToExpandedFst<Impl>(std::make_shared<Impl>(fst)) {}
ConstFst(const ConstFst<A, Unsigned> &fst, bool safe = false)
: ImplToExpandedFst<Impl>(fst) {}
// Gets a copy of this ConstFst. See Fst<>::Copy() for further doc.
ConstFst<A, Unsigned> *Copy(bool safe = false) const override {
return new ConstFst<A, Unsigned>(*this, safe);
}
// Reads a ConstFst from an input stream, returning nullptr on error.
static ConstFst<A, Unsigned> *Read(std::istream &strm,
const FstReadOptions &opts) {
auto *impl = Impl::Read(strm, opts);
return impl ? new ConstFst<A, Unsigned>(std::shared_ptr<Impl>(impl))
: nullptr;
}
// Read a ConstFst from a file; return nullptr on error; empty filename reads
// from standard input.
static ConstFst<A, Unsigned> *Read(const string &filename) {
auto *impl = ImplToExpandedFst<Impl>::Read(filename);
return impl ? new ConstFst<A, Unsigned>(std::shared_ptr<Impl>(impl))
: nullptr;
}
bool Write(std::ostream &strm, const FstWriteOptions &opts) const override {
return WriteFst(*this, strm, opts);
}
bool Write(const string &filename) const override {
return Fst<Arc>::WriteFile(filename);
}
template <class FST>
static bool WriteFst(const FST &fst, std::ostream &strm,
const FstWriteOptions &opts);
void InitStateIterator(StateIteratorData<Arc> *data) const override {
GetImpl()->InitStateIterator(data);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetImpl()->InitArcIterator(s, data);
}
private:
explicit ConstFst(std::shared_ptr<Impl> impl)
: ImplToExpandedFst<Impl>(impl) {}
using ImplToFst<Impl, ExpandedFst<Arc>>::GetImpl;
// Uses overloading to extract the type of the argument.
static const Impl *GetImplIfConstFst(const ConstFst &const_fst) {
return const_fst.GetImpl();
}
// NB: this does not give privileged treatment to subtypes of ConstFst.
template <typename FST>
static Impl *GetImplIfConstFst(const FST &fst) {
return nullptr;
}
ConstFst &operator=(const ConstFst &) = delete;
};
// Writes FST in Const format, potentially with a pass over the machine before
// writing to compute number of states and arcs.
template <class Arc, class Unsigned>
template <class FST>
bool ConstFst<Arc, Unsigned>::WriteFst(const FST &fst, std::ostream &strm,
const FstWriteOptions &opts) {
const auto file_version =
opts.align ? internal::ConstFstImpl<Arc, Unsigned>::kAlignedFileVersion
: internal::ConstFstImpl<Arc, Unsigned>::kFileVersion;
size_t num_arcs = 0; // To silence -Wsometimes-uninitialized warnings.
size_t num_states = 0; // Ditto.
size_t start_offset = 0;
bool update_header = true;
if (const auto *impl = GetImplIfConstFst(fst)) {
num_arcs = impl->narcs_;
num_states = impl->nstates_;
update_header = false;
} else if (opts.stream_write || (start_offset = strm.tellp()) == -1) {
// precompute values needed for header when we cannot seek to rewrite it.
num_arcs = 0;
num_states = 0;
for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) {
num_arcs += fst.NumArcs(siter.Value());
++num_states;
}
update_header = false;
}
FstHeader hdr;
hdr.SetStart(fst.Start());
hdr.SetNumStates(num_states);
hdr.SetNumArcs(num_arcs);
string type = "const";
if (sizeof(Unsigned) != sizeof(uint32_t)) {
type += std::to_string(CHAR_BIT * sizeof(Unsigned));
}
const auto properties =
fst.Properties(kCopyProperties, true) |
internal::ConstFstImpl<Arc, Unsigned>::kStaticProperties;
internal::FstImpl<Arc>::WriteFstHeader(fst, strm, opts, file_version, type,
properties, &hdr);
if (opts.align && !AlignOutput(strm)) {
LOG(ERROR) << "Could not align file during write after header";
return false;
}
size_t pos = 0;
size_t states = 0;
typename ConstFst<Arc, Unsigned>::ConstState state;
for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
state.weight = fst.Final(s);
state.pos = pos;
state.narcs = fst.NumArcs(s);
state.niepsilons = fst.NumInputEpsilons(s);
state.noepsilons = fst.NumOutputEpsilons(s);
strm.write(reinterpret_cast<const char *>(&state), sizeof(state));
pos += state.narcs;
++states;
}
hdr.SetNumStates(states);
hdr.SetNumArcs(pos);
if (opts.align && !AlignOutput(strm)) {
LOG(ERROR) << "Could not align file during write after writing states";
}
for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) {
for (ArcIterator<FST> aiter(fst, siter.Value()); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
// Google-only...
#ifdef MEMORY_SANITIZER
// arc may contain padding which has unspecified contents. Tell MSAN to
// not complain about it when writing it to a file.
ANNOTATE_MEMORY_IS_INITIALIZED(reinterpret_cast<const char *>(&arc),
sizeof(arc));
#endif
// ...Google-only
strm.write(reinterpret_cast<const char *>(&arc), sizeof(arc));
}
}
strm.flush();
if (!strm) {
LOG(ERROR) << "ConstFst::WriteFst: write failed: " << opts.source;
return false;
}
if (update_header) {
return internal::FstImpl<Arc>::UpdateFstHeader(
fst, strm, opts, file_version, type, properties, &hdr, start_offset);
} else {
if (hdr.NumStates() != num_states) {
LOG(ERROR) << "Inconsistent number of states observed during write";
return false;
}
if (hdr.NumArcs() != num_arcs) {
LOG(ERROR) << "Inconsistent number of arcs observed during write";
return false;
}
}
return true;
}
// Specialization for ConstFst; see generic version in fst.h for sample usage
// (but use the ConstFst type instead). This version should inline.
template <class Arc, class Unsigned>
class StateIterator<ConstFst<Arc, Unsigned>> {
public:
using StateId = typename Arc::StateId;
explicit StateIterator(const ConstFst<Arc, Unsigned> &fst)
: nstates_(fst.GetImpl()->NumStates()), s_(0) {}
bool Done() const { return s_ >= nstates_; }
StateId Value() const { return s_; }
void Next() { ++s_; }
void Reset() { s_ = 0; }
private:
const StateId nstates_;
StateId s_;
};
// Specialization for ConstFst; see generic version in fst.h for sample usage
// (but use the ConstFst type instead). This version should inline.
template <class Arc, class Unsigned>
class ArcIterator<ConstFst<Arc, Unsigned>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const ConstFst<Arc, Unsigned> &fst, StateId s)
: arcs_(fst.GetImpl()->Arcs(s)),
narcs_(fst.GetImpl()->NumArcs(s)),
i_(0) {}
bool Done() const { return i_ >= narcs_; }
const Arc &Value() const { return arcs_[i_]; }
void Next() { ++i_; }
size_t Position() const { return i_; }
void Reset() { i_ = 0; }
void Seek(size_t a) { i_ = a; }
constexpr uint32_t Flags() const { return kArcValueFlags; }
void SetFlags(uint32_t, uint32_t) {}
private:
const Arc *arcs_;
size_t narcs_;
size_t i_;
};
// A useful alias when using StdArc.
using StdConstFst = ConstFst<StdArc>;
} // namespace fst
#endif // FST_CONST_FST_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstdifference.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_string(compose_filter, "auto",
"Composition filter, one of: \"alt_sequence\", \"auto\", "
"\"match\", \"null\", \"sequence\", \"trivial\"");
DEFINE_bool(connect, true, "Trim output");
int fstdifference_main(int argc, char **argv);
int main(int argc, char **argv) { return fstdifference_main(argc, argv); }
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstintersect.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_string(compose_filter, "auto",
"Composition filter, one of: \"alt_sequence\", \"auto\", "
"\"match\", \"null\", \"sequence\", \"trivial\"");
DEFINE_bool(connect, true, "Trim output");
int fstintersect_main(int argc, char **argv);
int main(int argc, char **argv) { return fstintersect_main(argc, argv); }
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-electronjs_v8.0_16k-linux-amd64-opt.yml | build:
template_file: test-linux-opt-base.tyml
docker_image: "ubuntu:16.04"
dependencies:
- "linux-amd64-cpu-opt"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
system_setup:
>
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} ${electronjs.packages_xenial.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 8.0.1 16k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU ElectronJS v8.0 tests (16kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on ElectronJS v8.0, CPU only, optimized version (16kHz)"
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/mpdt/mpdtcompose.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Composes an MPDT and an FST.
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/extensions/mpdt/mpdtscript.h>
#include <fst/extensions/mpdt/read_write_utils.h>
#include <fst/extensions/pdt/getters.h>
#include <fst/util.h>
DEFINE_string(mpdt_parentheses, "",
"MPDT parenthesis label pairs with assignments");
DEFINE_bool(left_mpdt, true, "Is the first argument the MPDT?");
DEFINE_bool(connect, true, "Trim output?");
DEFINE_string(compose_filter, "paren",
"Composition filter, one of: \"expand\", \"expand_paren\", "
"\"paren\"");
int main(int argc, char **argv) {
namespace s = fst::script;
using fst::MPdtComposeOptions;
using fst::PdtComposeFilter;
using fst::ReadLabelTriples;
using fst::script::FstClass;
using fst::script::VectorFstClass;
string usage = "Compose an MPDT and an FST.\n\n Usage: ";
usage += argv[0];
usage += " in.pdt in.fst [out.mpdt]\n";
usage += " in.fst in.pdt [out.mpdt]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
if (argc < 3 || argc > 4) {
ShowUsage();
return 1;
}
const string in1_name = strcmp(argv[1], "-") == 0 ? "" : argv[1];
const string in2_name = strcmp(argv[2], "-") == 0 ? "" : argv[2];
const string out_name = argc > 3 ? argv[3] : "";
if (in1_name.empty() && in2_name.empty()) {
LOG(ERROR) << argv[0] << ": Can't take both inputs from standard input.";
return 1;
}
std::unique_ptr<FstClass> ifst1(FstClass::Read(in1_name));
if (!ifst1) return 1;
std::unique_ptr<FstClass> ifst2(FstClass::Read(in2_name));
if (!ifst2) return 1;
if (FLAGS_mpdt_parentheses.empty()) {
LOG(ERROR) << argv[0] << ": No MPDT parenthesis label pairs provided";
return 1;
}
std::vector<s::LabelPair> parens;
std::vector<int64> assignments;
if (!ReadLabelTriples(FLAGS_mpdt_parentheses, &parens, &assignments, false))
return 1;
VectorFstClass ofst(ifst1->ArcType());
PdtComposeFilter compose_filter;
if (!s::GetPdtComposeFilter(FLAGS_compose_filter, &compose_filter)) {
LOG(ERROR) << argv[0] << ": Unknown or unsupported compose filter type: "
<< FLAGS_compose_filter;
return 1;
}
const MPdtComposeOptions opts(FLAGS_connect, compose_filter);
s::MPdtCompose(*ifst1, *ifst2, parens, assignments, &ofst, opts,
FLAGS_left_mpdt);
ofst.Write(out_name);
return 0;
}
| 0 |
coqui_public_repos/TTS/TTS/tts | coqui_public_repos/TTS/TTS/tts/utils/visual.py | import librosa
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.colors import LogNorm
matplotlib.use("Agg")
def plot_alignment(alignment, info=None, fig_size=(16, 10), title=None, output_fig=False, plot_log=False):
if isinstance(alignment, torch.Tensor):
alignment_ = alignment.detach().cpu().numpy().squeeze()
else:
alignment_ = alignment
alignment_ = alignment_.astype(np.float32) if alignment_.dtype == np.float16 else alignment_
fig, ax = plt.subplots(figsize=fig_size)
im = ax.imshow(
alignment_.T, aspect="auto", origin="lower", interpolation="none", norm=LogNorm() if plot_log else None
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
if info is not None:
xlabel += "\n\n" + info
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
# plt.yticks(range(len(text)), list(text))
plt.tight_layout()
if title is not None:
plt.title(title)
if not output_fig:
plt.close()
return fig
def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False):
if isinstance(spectrogram, torch.Tensor):
spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T
else:
spectrogram_ = spectrogram.T
spectrogram_ = spectrogram_.astype(np.float32) if spectrogram_.dtype == np.float16 else spectrogram_
if ap is not None:
spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access
fig = plt.figure(figsize=fig_size)
plt.imshow(spectrogram_, aspect="auto", origin="lower")
plt.colorbar()
plt.tight_layout()
if not output_fig:
plt.close()
return fig
def plot_pitch(pitch, spectrogram, ap=None, fig_size=(30, 10), output_fig=False):
"""Plot pitch curves on top of the spectrogram.
Args:
pitch (np.array): Pitch values.
spectrogram (np.array): Spectrogram values.
Shapes:
pitch: :math:`(T,)`
spec: :math:`(C, T)`
"""
if isinstance(spectrogram, torch.Tensor):
spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T
else:
spectrogram_ = spectrogram.T
spectrogram_ = spectrogram_.astype(np.float32) if spectrogram_.dtype == np.float16 else spectrogram_
if ap is not None:
spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
ax.imshow(spectrogram_, aspect="auto", origin="lower")
ax.set_xlabel("time")
ax.set_ylabel("spec_freq")
ax2 = ax.twinx()
ax2.plot(pitch, linewidth=5.0, color="red")
ax2.set_ylabel("F0")
plt.rcParams["figure.figsize"] = old_fig_size
if not output_fig:
plt.close()
return fig
def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):
"""Plot pitch curves on top of the input characters.
Args:
pitch (np.array): Pitch values.
chars (str): Characters to place to the x-axis.
Shapes:
pitch: :math:`(T,)`
"""
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
x = np.array(range(len(chars)))
my_xticks = chars
plt.xticks(x, my_xticks)
ax.set_xlabel("characters")
ax.set_ylabel("freq")
ax2 = ax.twinx()
ax2.plot(pitch, linewidth=5.0, color="red")
ax2.set_ylabel("F0")
plt.rcParams["figure.figsize"] = old_fig_size
if not output_fig:
plt.close()
return fig
def plot_avg_energy(energy, chars, fig_size=(30, 10), output_fig=False):
"""Plot energy curves on top of the input characters.
Args:
energy (np.array): energy values.
chars (str): Characters to place to the x-axis.
Shapes:
energy: :math:`(T,)`
"""
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
x = np.array(range(len(chars)))
my_xticks = chars
plt.xticks(x, my_xticks)
ax.set_xlabel("characters")
ax.set_ylabel("freq")
ax2 = ax.twinx()
ax2.plot(energy, linewidth=5.0, color="red")
ax2.set_ylabel("energy")
plt.rcParams["figure.figsize"] = old_fig_size
if not output_fig:
plt.close()
return fig
def visualize(
alignment,
postnet_output,
text,
hop_length,
CONFIG,
tokenizer,
stop_tokens=None,
decoder_output=None,
output_path=None,
figsize=(8, 24),
output_fig=False,
):
"""Intended to be used in Notebooks."""
if decoder_output is not None:
num_plot = 4
else:
num_plot = 3
label_fontsize = 16
fig = plt.figure(figsize=figsize)
plt.subplot(num_plot, 1, 1)
plt.imshow(alignment.T, aspect="auto", origin="lower", interpolation=None)
plt.xlabel("Decoder timestamp", fontsize=label_fontsize)
plt.ylabel("Encoder timestamp", fontsize=label_fontsize)
# compute phoneme representation and back
if CONFIG.use_phonemes:
seq = tokenizer.text_to_ids(text)
text = tokenizer.ids_to_text(seq)
print(text)
plt.yticks(range(len(text)), list(text))
plt.colorbar()
if stop_tokens is not None:
# plot stopnet predictions
plt.subplot(num_plot, 1, 2)
plt.plot(range(len(stop_tokens)), list(stop_tokens))
# plot postnet spectrogram
plt.subplot(num_plot, 1, 3)
librosa.display.specshow(
postnet_output.T,
sr=CONFIG.audio["sample_rate"],
hop_length=hop_length,
x_axis="time",
y_axis="linear",
fmin=CONFIG.audio["mel_fmin"],
fmax=CONFIG.audio["mel_fmax"],
)
plt.xlabel("Time", fontsize=label_fontsize)
plt.ylabel("Hz", fontsize=label_fontsize)
plt.tight_layout()
plt.colorbar()
if decoder_output is not None:
plt.subplot(num_plot, 1, 4)
librosa.display.specshow(
decoder_output.T,
sr=CONFIG.audio["sample_rate"],
hop_length=hop_length,
x_axis="time",
y_axis="linear",
fmin=CONFIG.audio["mel_fmin"],
fmax=CONFIG.audio["mel_fmax"],
)
plt.xlabel("Time", fontsize=label_fontsize)
plt.ylabel("Hz", fontsize=label_fontsize)
plt.tight_layout()
plt.colorbar()
if output_path:
print(output_path)
fig.savefig(output_path)
plt.close()
if not output_fig:
plt.close()
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstarcsort.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/compat.h>
#include <fst/flags.h>
DEFINE_string(sort_type, "ilabel",
"Comparison method, one of: \"ilabel\", \"olabel\"");
int fstarcsort_main(int argc, char **argv);
int main(int argc, char **argv) { return fstarcsort_main(argc, argv); }
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/edit-fst.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// An FST implementation that allows non-destructive edit operations on an
// existing FST.
//
// The EditFst class enables non-destructive edit operations on a wrapped
// ExpandedFst. The implementation uses copy-on-write semantics at the node
// level: if a user has an underlying fst on which he or she wants to perform a
// relatively small number of edits (read: mutations), then this implementation
// will copy the edited node to an internal MutableFst and perform any edits in
// situ on that copied node. This class supports all the methods of MutableFst
// except for DeleteStates(const std::vector<StateId> &); thus, new nodes may
// also be
// added, and one may add transitions from existing nodes of the wrapped fst to
// new nodes.
//
// N.B.: The documentation for Fst::Copy(true) says that its behavior is
// undefined if invoked on an fst that has already been accessed. This class
// requires that the Fst implementation it wraps provides consistent, reliable
// behavior when its Copy(true) method is invoked, where consistent means
// the graph structure, graph properties and state numbering and do not change.
// VectorFst and CompactFst, for example, are both well-behaved in this regard.
#ifndef FST_EDIT_FST_H_
#define FST_EDIT_FST_H_
#include <string>
#include <unordered_map>
#include <vector>
#include <fst/log.h>
#include <fst/cache.h>
namespace fst {
namespace internal {
// The EditFstData class is a container for all mutable data for EditFstImpl;
// also, this class provides most of the actual implementation of what EditFst
// does (that is, most of EditFstImpl's methods delegate to methods in this, the
// EditFstData class). Instances of this class are reference-counted and can be
// shared between otherwise independent EditFstImpl instances. This scheme
// allows EditFstImpl to implement the thread-safe, copy-on-write semantics
// required by Fst::Copy(true).
//
// template parameters:
// A the type of arc to use
// WrappedFstT the type of fst wrapped by the EditFst instance that
// this EditFstData instance is backing
// MutableFstT the type of mutable fst to use internally for edited states;
// crucially, MutableFstT::Copy(false) *must* yield an fst that is
// thread-safe for reading (VectorFst, for example, has this property)
template <typename Arc, typename WrappedFstT = ExpandedFst<Arc>,
typename MutableFstT = VectorFst<Arc>>
class EditFstData {
public:
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
EditFstData() : num_new_states_(0) {}
EditFstData(const EditFstData &other)
: edits_(other.edits_),
external_to_internal_ids_(other.external_to_internal_ids_),
edited_final_weights_(other.edited_final_weights_),
num_new_states_(other.num_new_states_) {}
~EditFstData() {}
static EditFstData<Arc, WrappedFstT, MutableFstT> *Read(
std::istream &strm, const FstReadOptions &opts);
bool Write(std::ostream &strm, const FstWriteOptions &opts) const {
// Serialize all private data members of this class.
FstWriteOptions edits_opts(opts);
edits_opts.write_header = true; // Force writing contained header.
edits_.Write(strm, edits_opts);
WriteType(strm, external_to_internal_ids_);
WriteType(strm, edited_final_weights_);
WriteType(strm, num_new_states_);
if (!strm) {
LOG(ERROR) << "EditFstData::Write: Write failed: " << opts.source;
return false;
}
return true;
}
StateId NumNewStates() const { return num_new_states_; }
// accessor methods for the fst holding edited states
StateId EditedStart() const { return edits_.Start(); }
Weight Final(StateId s, const WrappedFstT *wrapped) const {
auto final_weight_it = GetFinalWeightIterator(s);
if (final_weight_it == NotInFinalWeightMap()) {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->Final(s)
: edits_.Final(it->second);
} else {
return final_weight_it->second;
}
}
size_t NumArcs(StateId s, const WrappedFstT *wrapped) const {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->NumArcs(s)
: edits_.NumArcs(it->second);
}
size_t NumInputEpsilons(StateId s, const WrappedFstT *wrapped) const {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->NumInputEpsilons(s)
: edits_.NumInputEpsilons(it->second);
}
size_t NumOutputEpsilons(StateId s, const WrappedFstT *wrapped) const {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->NumOutputEpsilons(s)
: edits_.NumOutputEpsilons(it->second);
}
void SetEditedProperties(uint64 props, uint64 mask) {
edits_.SetProperties(props, mask);
}
// Non-const MutableFst operations.
// Sets the start state for this FST.
void SetStart(StateId s) { edits_.SetStart(s); }
// Sets the final state for this FST.
Weight SetFinal(StateId s, Weight w, const WrappedFstT *wrapped) {
Weight old_weight = Final(s, wrapped);
auto it = GetEditedIdMapIterator(s);
// If we haven't already edited state s, don't add it to edited_ (which can
// be expensive if s has many transitions); just use the
// edited_final_weights_ map.
if (it == NotInEditedMap()) {
edited_final_weights_[s] = w;
} else {
edits_.SetFinal(GetEditableInternalId(s, wrapped), w);
}
return old_weight;
}
// Adds a new state to this FST, initially with no arcs.
StateId AddState(StateId curr_num_states) {
StateId internal_state_id = edits_.AddState();
StateId external_state_id = curr_num_states;
external_to_internal_ids_[external_state_id] = internal_state_id;
num_new_states_++;
return external_state_id;
}
// Adds the specified arc to the specified state of this FST.
const Arc *AddArc(StateId s, const Arc &arc, const WrappedFstT *wrapped) {
const auto internal_id = GetEditableInternalId(s, wrapped);
const auto num_arcs = edits_.NumArcs(internal_id);
ArcIterator<MutableFstT> arc_it(edits_, internal_id);
const Arc *prev_arc = nullptr;
if (num_arcs > 0) {
// grab the final arc associated with this state in edits_
arc_it.Seek(num_arcs - 1);
prev_arc = &(arc_it.Value());
}
edits_.AddArc(internal_id, arc);
return prev_arc;
}
void DeleteStates() {
edits_.DeleteStates();
num_new_states_ = 0;
external_to_internal_ids_.clear();
edited_final_weights_.clear();
}
// Removes all but the first n outgoing arcs of the specified state.
void DeleteArcs(StateId s, size_t n, const WrappedFstT *wrapped) {
edits_.DeleteArcs(GetEditableInternalId(s, wrapped), n);
}
// Removes all outgoing arcs from the specified state.
void DeleteArcs(StateId s, const WrappedFstT *wrapped) {
edits_.DeleteArcs(GetEditableInternalId(s, wrapped));
}
// End methods for non-const MutableFst operations.
// Provides information for the generic arc iterator.
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data,
const WrappedFstT *wrapped) const {
auto id_map_it = GetEditedIdMapIterator(s);
if (id_map_it == NotInEditedMap()) {
VLOG(3) << "EditFstData::InitArcIterator: iterating on state " << s
<< " of original fst";
wrapped->InitArcIterator(s, data);
} else {
VLOG(2) << "EditFstData::InitArcIterator: iterating on edited state " << s
<< " (internal state id: " << id_map_it->second << ")";
edits_.InitArcIterator(id_map_it->second, data);
}
}
// Provides information for the generic mutable arc iterator.
void InitMutableArcIterator(StateId s, MutableArcIteratorData<Arc> *data,
const WrappedFstT *wrapped) {
data->base = new MutableArcIterator<MutableFstT>(
&edits_, GetEditableInternalId(s, wrapped));
}
// Prints out the map from external to internal state id's (for debugging
// purposes).
void PrintMap() {
for (auto map_it = external_to_internal_ids_.begin();
map_it != NotInEditedMap(); ++map_it) {
LOG(INFO) << "(external,internal)=(" << map_it->first << ","
<< map_it->second << ")";
}
}
private:
// Returns the iterator of the map from external to internal state id's
// of edits_ for the specified external state id.
typename std::unordered_map<StateId, StateId>::const_iterator
GetEditedIdMapIterator(StateId s) const {
return external_to_internal_ids_.find(s);
}
typename std::unordered_map<StateId, StateId>::const_iterator
NotInEditedMap() const {
return external_to_internal_ids_.end();
}
typename std::unordered_map<StateId, Weight>::const_iterator
GetFinalWeightIterator(StateId s) const {
return edited_final_weights_.find(s);
}
typename std::unordered_map<StateId, Weight>::const_iterator
NotInFinalWeightMap() const {
return edited_final_weights_.end();
}
// Returns the internal state ID of the specified external ID if the state has
// already been made editable, or else copies the state from wrapped_ to
// edits_ and returns the state id of the newly editable state in edits_.
StateId GetEditableInternalId(StateId s, const WrappedFstT *wrapped) {
auto id_map_it = GetEditedIdMapIterator(s);
if (id_map_it == NotInEditedMap()) {
StateId new_internal_id = edits_.AddState();
VLOG(2) << "EditFstData::GetEditableInternalId: editing state " << s
<< " of original fst; new internal state id:" << new_internal_id;
external_to_internal_ids_[s] = new_internal_id;
for (ArcIterator<Fst<Arc>> arc_iterator(*wrapped, s);
!arc_iterator.Done(); arc_iterator.Next()) {
edits_.AddArc(new_internal_id, arc_iterator.Value());
}
// Copies the final weight.
auto final_weight_it = GetFinalWeightIterator(s);
if (final_weight_it == NotInFinalWeightMap()) {
edits_.SetFinal(new_internal_id, wrapped->Final(s));
} else {
edits_.SetFinal(new_internal_id, final_weight_it->second);
edited_final_weights_.erase(s);
}
return new_internal_id;
} else {
return id_map_it->second;
}
}
// A mutable FST (by default, a VectorFst) to contain new states, and/or
// copies of states from a wrapped ExpandedFst that have been modified in
// some way.
MutableFstT edits_;
// A mapping from external state IDs to the internal IDs of states that
// appear in edits_.
std::unordered_map<StateId, StateId> external_to_internal_ids_;
// A mapping from external state IDs to final state weights assigned to
// those states. The states in this map are *only* those whose final weight
// has been modified; if any other part of the state has been modified,
// the entire state is copied to edits_, and all modifications reside there.
std::unordered_map<StateId, Weight> edited_final_weights_;
// The number of new states added to this mutable fst impl, which is <= the
// number of states in edits_ (since edits_ contains both edited *and* new
// states).
StateId num_new_states_;
};
// EditFstData method implementations: just the Read method.
template <typename A, typename WrappedFstT, typename MutableFstT>
EditFstData<A, WrappedFstT, MutableFstT> *
EditFstData<A, WrappedFstT, MutableFstT>::Read(std::istream &strm,
const FstReadOptions &opts) {
auto *data = new EditFstData<A, WrappedFstT, MutableFstT>();
// next read in MutabelFstT machine that stores edits
FstReadOptions edits_opts(opts);
// Contained header was written out, so read it in.
edits_opts.header = nullptr;
// Because our internal representation of edited states is a solid object
// of type MutableFstT (defaults to VectorFst<A>) and not a pointer,
// and because the static Read method allocates a new object on the heap,
// we need to call Read, check if there was a failure, use
// MutableFstT::operator= to assign the object (not the pointer) to the
// edits_ data member (which will increase the ref count by 1 on the impl)
// and, finally, delete the heap-allocated object.
std::unique_ptr<MutableFstT> edits(MutableFstT::Read(strm, edits_opts));
if (!edits) return nullptr;
data->edits_ = *edits;
edits.reset();
// Finally, reads in rest of private data members.
ReadType(strm, &data->external_to_internal_ids_);
ReadType(strm, &data->edited_final_weights_);
ReadType(strm, &data->num_new_states_);
if (!strm) {
LOG(ERROR) << "EditFst::Read: read failed: " << opts.source;
return nullptr;
}
return data;
}
// This class enables non-destructive edit operations on a wrapped ExpandedFst.
// The implementation uses copy-on-write semantics at the node level: if a user
// has an underlying fst on which he or she wants to perform a relatively small
// number of edits (read: mutations), then this implementation will copy the
// edited node to an internal MutableFst and perform any edits in situ on that
// copied node. This class supports all the methods of MutableFst except for
// DeleteStates(const std::vector<StateId> &); thus, new nodes may also be
// added, and
// one may add transitions from existing nodes of the wrapped fst to new nodes.
//
// template parameters:
// A the type of arc to use
// WrappedFstT the type of fst wrapped by the EditFst instance that
// this EditFstImpl instance is backing
// MutableFstT the type of mutable fst to use internally for edited states;
// crucially, MutableFstT::Copy(false) *must* yield an fst that is
// thread-safe for reading (VectorFst, for example, has this property)
template <typename A, typename WrappedFstT = ExpandedFst<A>,
typename MutableFstT = VectorFst<A>>
class EditFstImpl : public FstImpl<A> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using FstImpl<Arc>::WriteHeader;
// Constructs an editable FST implementation with no states. Effectively, this
// initially-empty fst will in every way mimic the behavior of a
// VectorFst---more precisely, a VectorFstImpl instance---but with slightly
// slower performance (by a constant factor), due to the fact that
// this class maintains a mapping between external state id's and
// their internal equivalents.
EditFstImpl() : wrapped_(new MutableFstT()) {
FstImpl<Arc>::SetType("edit");
InheritPropertiesFromWrapped();
data_ = std::make_shared<EditFstData<Arc, WrappedFstT, MutableFstT>>();
}
// Wraps the specified ExpandedFst. This constructor requires that the
// specified Fst is an ExpandedFst instance. This requirement is only enforced
// at runtime. (See below for the reason.)
//
// This library uses the pointer-to-implementation or "PIMPL" design pattern.
// In particular, to make it convenient to bind an implementation class to its
// interface, there are a pair of template "binder" classes, one for immutable
// and one for mutable fst's (ImplToFst and ImplToMutableFst, respectively).
// As it happens, the API for the ImplToMutableFst<I,F> class requires that
// the implementation class--the template parameter "I"--have a constructor
// taking a const Fst<A> reference. Accordingly, the constructor here must
// perform a static_cast to the WrappedFstT type required by EditFst and
// therefore EditFstImpl.
explicit EditFstImpl(const Fst<Arc> &wrapped)
: wrapped_(static_cast<WrappedFstT *>(wrapped.Copy())) {
FstImpl<Arc>::SetType("edit");
data_ = std::make_shared<EditFstData<Arc, WrappedFstT, MutableFstT>>();
// have edits_ inherit all properties from wrapped_
data_->SetEditedProperties(wrapped_->Properties(kFstProperties, false),
kFstProperties);
InheritPropertiesFromWrapped();
}
// A copy constructor for this implementation class, used to implement
// the Copy() method of the Fst interface.
EditFstImpl(const EditFstImpl &impl)
: FstImpl<Arc>(),
wrapped_(static_cast<WrappedFstT *>(impl.wrapped_->Copy(true))),
data_(impl.data_) {
SetProperties(impl.Properties());
}
// const Fst/ExpandedFst operations, declared in the Fst and ExpandedFst
// interfaces
StateId Start() const {
const auto edited_start = data_->EditedStart();
return edited_start == kNoStateId ? wrapped_->Start() : edited_start;
}
Weight Final(StateId s) const { return data_->Final(s, wrapped_.get()); }
size_t NumArcs(StateId s) const { return data_->NumArcs(s, wrapped_.get()); }
size_t NumInputEpsilons(StateId s) const {
return data_->NumInputEpsilons(s, wrapped_.get());
}
size_t NumOutputEpsilons(StateId s) const {
return data_->NumOutputEpsilons(s, wrapped_.get());
}
StateId NumStates() const {
return wrapped_->NumStates() + data_->NumNewStates();
}
static EditFstImpl<Arc, WrappedFstT, MutableFstT> *Read(
std::istream &strm, const FstReadOptions &opts);
bool Write(std::ostream &strm, const FstWriteOptions &opts) const {
FstHeader hdr;
hdr.SetStart(Start());
hdr.SetNumStates(NumStates());
FstWriteOptions header_opts(opts);
// Allows the contained FST to hold any symbols.
header_opts.write_isymbols = false;
header_opts.write_osymbols = false;
WriteHeader(strm, header_opts, kFileVersion, &hdr);
// First, serializes the wrapped FST to stream.
FstWriteOptions wrapped_opts(opts);
// Forcse writing the contained header.
wrapped_opts.write_header = true;
wrapped_->Write(strm, wrapped_opts);
data_->Write(strm, opts);
strm.flush();
if (!strm) {
LOG(ERROR) << "EditFst::Write: Write failed: " << opts.source;
return false;
}
return true;
}
// Sets the start state for this FST.
void SetStart(StateId s) {
MutateCheck();
data_->SetStart(s);
SetProperties(SetStartProperties(FstImpl<Arc>::Properties()));
}
// Sets the final state for this fst.
void SetFinal(StateId s, Weight weight) {
MutateCheck();
Weight old_weight = data_->SetFinal(s, weight, wrapped_.get());
SetProperties(
SetFinalProperties(FstImpl<Arc>::Properties(), old_weight, weight));
}
// Adds a new state to this fst, initially with no arcs.
StateId AddState() {
MutateCheck();
SetProperties(AddStateProperties(FstImpl<Arc>::Properties()));
return data_->AddState(NumStates());
}
// Adds the specified arc to the specified state of this fst.
void AddArc(StateId s, const Arc &arc) {
MutateCheck();
const auto *prev_arc = data_->AddArc(s, arc, wrapped_.get());
SetProperties(
AddArcProperties(FstImpl<Arc>::Properties(), s, arc, prev_arc));
}
void DeleteStates(const std::vector<StateId> &dstates) {
FSTERROR() << ": EditFstImpl::DeleteStates(const std::vector<StateId>&): "
<< " not implemented";
SetProperties(kError, kError);
}
// Deletes all states in this fst.
void DeleteStates();
// Removes all but the first n outgoing arcs of the specified state.
void DeleteArcs(StateId s, size_t n) {
MutateCheck();
data_->DeleteArcs(s, n, wrapped_.get());
SetProperties(DeleteArcsProperties(FstImpl<Arc>::Properties()));
}
// Removes all outgoing arcs from the specified state.
void DeleteArcs(StateId s) {
MutateCheck();
data_->DeleteArcs(s, wrapped_.get());
SetProperties(DeleteArcsProperties(FstImpl<Arc>::Properties()));
}
void ReserveStates(StateId s) {}
void ReserveArcs(StateId s, size_t n) {}
// Ends non-const MutableFst operations.
// Provides information for the generic state iterator.
void InitStateIterator(StateIteratorData<Arc> *data) const {
data->base = nullptr;
data->nstates = NumStates();
}
// Provides information for the generic arc iterator.
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const {
data_->InitArcIterator(s, data, wrapped_.get());
}
// Provides information for the generic mutable arc iterator.
void InitMutableArcIterator(StateId s, MutableArcIteratorData<Arc> *data) {
MutateCheck();
data_->InitMutableArcIterator(s, data, wrapped_.get());
}
private:
// Properties always true of this FST class.
static constexpr uint64 kStaticProperties = kExpanded | kMutable;
// Current file format version.
static constexpr int kFileVersion = 2;
// Minimum file format version supported
static constexpr int kMinFileVersion = 2;
// Causes this FST to inherit all the properties from its wrapped FST, except
// for the two properties that always apply to EditFst instances: kExpanded
// and kMutable.
void InheritPropertiesFromWrapped() {
SetProperties(wrapped_->Properties(kCopyProperties, false) |
kStaticProperties);
SetInputSymbols(wrapped_->InputSymbols());
SetOutputSymbols(wrapped_->OutputSymbols());
}
// This method ensures that any operations that alter the mutable data
// portion of this EditFstImpl cause the data_ member to be copied when its
// reference count is greater than 1. Note that this method is distinct from
// MutableFst::Mutate, which gets invoked whenever one of the basic mutation
// methods defined in MutableFst is invoked, such as SetInputSymbols.
// The MutateCheck here in EditFstImpl is invoked whenever one of the
// mutating methods specifically related to the types of edits provided
// by EditFst is performed, such as changing an arc of an existing state
// of the wrapped fst via a MutableArcIterator, or adding a new state via
// AddState().
void MutateCheck() {
if (!data_.unique()) {
data_ =
std::make_shared<EditFstData<Arc, WrappedFstT, MutableFstT>>(*data_);
}
}
// The FST that this FST wraps. The purpose of this class is to enable
// non-destructive edits on this wrapped FST.
std::unique_ptr<const WrappedFstT> wrapped_;
// The mutable data for this EditFst instance, with delegates for all the
// methods that can mutate data.
std::shared_ptr<EditFstData<Arc, WrappedFstT, MutableFstT>> data_;
};
template <typename Arc, typename WrappedFstT, typename MutableFstT>
constexpr uint64 EditFstImpl<Arc, WrappedFstT, MutableFstT>::kStaticProperties;
template <typename Arc, typename WrappedFstT, typename MutableFstT>
constexpr int EditFstImpl<Arc, WrappedFstT, MutableFstT>::kFileVersion;
template <typename Arc, typename WrappedFstT, typename MutableFstT>
constexpr int EditFstImpl<Arc, WrappedFstT, MutableFstT>::kMinFileVersion;
template <typename Arc, typename WrappedFstT, typename MutableFstT>
inline void EditFstImpl<Arc, WrappedFstT, MutableFstT>::DeleteStates() {
data_->DeleteStates();
// we are deleting all states, so just forget about pointer to wrapped_
// and do what default constructor does: set wrapped_ to a new VectorFst
wrapped_.reset(new MutableFstT());
const auto new_props =
DeleteAllStatesProperties(FstImpl<Arc>::Properties(), kStaticProperties);
FstImpl<Arc>::SetProperties(new_props);
}
template <typename Arc, typename WrappedFstT, typename MutableFstT>
EditFstImpl<Arc, WrappedFstT, MutableFstT> *
EditFstImpl<Arc, WrappedFstT, MutableFstT>::Read(std::istream &strm,
const FstReadOptions &opts) {
auto *impl = new EditFstImpl();
FstHeader hdr;
if (!impl->ReadHeader(strm, opts, kMinFileVersion, &hdr)) return nullptr;
impl->SetStart(hdr.Start());
// Reads in wrapped FST.
FstReadOptions wrapped_opts(opts);
// Contained header was written out, so reads it in too.
wrapped_opts.header = nullptr;
std::unique_ptr<Fst<Arc>> wrapped_fst(Fst<Arc>::Read(strm, wrapped_opts));
if (!wrapped_fst) return nullptr;
impl->wrapped_.reset(static_cast<WrappedFstT *>(wrapped_fst.release()));
impl->data_ = std::shared_ptr<EditFstData<Arc, WrappedFstT, MutableFstT>>(
EditFstData<Arc, WrappedFstT, MutableFstT>::Read(strm, opts));
if (!impl->data_) return nullptr;
return impl;
}
} // namespace internal
// Concrete, editable FST. This class attaches interface to implementation.
template <typename A, typename WrappedFstT = ExpandedFst<A>,
typename MutableFstT = VectorFst<A>>
class EditFst : public ImplToMutableFst<
internal::EditFstImpl<A, WrappedFstT, MutableFstT>> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Impl = internal::EditFstImpl<Arc, WrappedFstT, MutableFstT>;
friend class MutableArcIterator<EditFst<Arc, WrappedFstT, MutableFstT>>;
EditFst() : ImplToMutableFst<Impl>(std::make_shared<Impl>()) {}
explicit EditFst(const Fst<Arc> &fst)
: ImplToMutableFst<Impl>(std::make_shared<Impl>(fst)) {}
explicit EditFst(const WrappedFstT &fst)
: ImplToMutableFst<Impl>(std::make_shared<Impl>(fst)) {}
// See Fst<>::Copy() for doc.
EditFst(const EditFst<Arc, WrappedFstT, MutableFstT> &fst, bool safe = false)
: ImplToMutableFst<Impl>(fst, safe) {}
~EditFst() override {}
// Gets a copy of this EditFst. See Fst<>::Copy() for further doc.
EditFst<Arc, WrappedFstT, MutableFstT> *Copy(
bool safe = false) const override {
return new EditFst<Arc, WrappedFstT, MutableFstT>(*this, safe);
}
EditFst<Arc, WrappedFstT, MutableFstT> &operator=(
const EditFst<Arc, WrappedFstT, MutableFstT> &fst) {
SetImpl(fst.GetSharedImpl());
return *this;
}
EditFst<Arc, WrappedFstT, MutableFstT> &operator=(
const Fst<Arc> &fst) override {
SetImpl(std::make_shared<Impl>(fst));
return *this;
}
// Reads an EditFst from an input stream, returning nullptr on error.
static EditFst<Arc, WrappedFstT, MutableFstT> *Read(
std::istream &strm, const FstReadOptions &opts) {
auto *impl = Impl::Read(strm, opts);
return impl ? new EditFst<Arc>(std::shared_ptr<Impl>(impl)) : nullptr;
}
// Reads an EditFst from a file, returning nullptr on error. If the filename
// argument is an empty string, it reads from standard input.
static EditFst<Arc, WrappedFstT, MutableFstT> *Read(const string &filename) {
auto *impl = ImplToExpandedFst<Impl, MutableFst<Arc>>::Read(filename);
return impl ? new EditFst<Arc, WrappedFstT, MutableFstT>(
std::shared_ptr<Impl>(impl))
: nullptr;
}
bool Write(std::ostream &strm, const FstWriteOptions &opts) const override {
return GetImpl()->Write(strm, opts);
}
bool Write(const string &filename) const override {
return Fst<Arc>::WriteFile(filename);
}
void InitStateIterator(StateIteratorData<Arc> *data) const override {
GetImpl()->InitStateIterator(data);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetImpl()->InitArcIterator(s, data);
}
void InitMutableArcIterator(StateId s,
MutableArcIteratorData<A> *data) override {
GetMutableImpl()->InitMutableArcIterator(s, data);
}
private:
explicit EditFst(std::shared_ptr<Impl> impl) : ImplToMutableFst<Impl>(impl) {}
using ImplToFst<Impl, MutableFst<Arc>>::GetImpl;
using ImplToFst<Impl, MutableFst<Arc>>::GetMutableImpl;
using ImplToFst<Impl, MutableFst<Arc>>::SetImpl;
};
} // namespace fst
#endif // FST_EDIT_FST_H_
| 0 |
coqui_public_repos | coqui_public_repos/STT/ds_generic.supp | {
libgomp_malloc
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
obj:/usr/lib/*/libgomp.so.1.0.0
}
| 0 |
coqui_public_repos/STT-models/catalan/ccoreilly | coqui_public_repos/STT-models/catalan/ccoreilly/v0.14.0/alphabet.txt | # Each line in this file represents the Unicode codepoint (UTF-8 encoded)
# associated with a numeric label.
# A line that starts with # is a comment. You can escape it with \# if you wish
# to use '#' as a label.
a
à
b
c
ç
d
e
è
é
f
g
h
i
í
ï
j
k
l
m
n
o
ò
ó
p
q
r
s
t
u
ú
ü
v
w
x
y
z
'
-
·
# The last (non-comment) line needs to end with a newline.
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/far/farextract.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Extracts component FSTs from an finite-state archive.
#include <string>
#include <vector>
#include <fst/flags.h>
#include <fst/extensions/far/farscript.h>
#include <fst/extensions/far/getters.h>
DEFINE_string(filename_prefix, "", "Prefix to append to filenames");
DEFINE_string(filename_suffix, "", "Suffix to append to filenames");
DEFINE_int32(generate_filenames, 0,
"Generate N digit numeric filenames (def: use keys)");
DEFINE_string(keys, "",
"Extract set of keys separated by comma (default) "
"including ranges delimited by dash (default)");
DEFINE_string(key_separator, ",", "Separator for individual keys");
DEFINE_string(range_delimiter, "-", "Delimiter for ranges of keys");
int main(int argc, char **argv) {
namespace s = fst::script;
string usage = "Extracts FSTs from a finite-state archive.\n\n Usage:";
usage += argv[0];
usage += " [in1.far in2.far...]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
s::ExpandArgs(argc, argv, &argc, &argv);
std::vector<string> in_fnames;
for (int i = 1; i < argc; ++i) in_fnames.push_back(argv[i]);
if (in_fnames.empty()) in_fnames.push_back("");
const auto arc_type = s::LoadArcTypeFromFar(in_fnames[0]);
if (arc_type.empty()) return 1;
s::FarExtract(in_fnames, arc_type, FLAGS_generate_filenames, FLAGS_keys,
FLAGS_key_separator, FLAGS_range_delimiter,
FLAGS_filename_prefix, FLAGS_filename_suffix);
return 0;
}
| 0 |
coqui_public_repos | coqui_public_repos/xtts-streaming-server/LICENSE | Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/far/script-impl.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Definitions and functions for invoking and using Far main functions that
// support multiple and extensible arc types.
#include <fst/extensions/far/script-impl.h>
#include <string>
#include <fst/extensions/far/far.h>
#include <fstream>
namespace fst {
namespace script {
string LoadArcTypeFromFar(const string &far_fname) {
FarHeader hdr;
if (!hdr.Read(far_fname)) {
LOG(ERROR) << "Error reading FAR: " << far_fname;
return "";
}
string atype = hdr.ArcType();
if (atype == "unknown") {
LOG(ERROR) << "Empty FST archive: " << far_fname;
return "";
}
return atype;
}
string LoadArcTypeFromFst(const string &fst_fname) {
FstHeader hdr;
std::ifstream in(fst_fname, std::ios_base::in | std::ios_base::binary);
if (!hdr.Read(in, fst_fname)) {
LOG(ERROR) << "Error reading FST: " << fst_fname;
return "";
}
return hdr.ArcType();
}
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/dfs-visit.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Depth-first search visitation. See visit.h for more general search queue
// disciplines.
#ifndef FST_DFS_VISIT_H_
#define FST_DFS_VISIT_H_
#include <stack>
#include <vector>
#include <fst/arcfilter.h>
#include <fst/fst.h>
namespace fst {
// Visitor Interface: class determining actions taken during a depth-first
// search-style visit. If any of the boolean member functions return false, the
// DFS is aborted by first calling FinishState() on all currently grey states
// and then calling FinishVisit().
//
// This is similar to the more general visitor interface in visit.h, except
// that FinishState returns additional information appropriate only for a DFS
// and some methods names here are better suited to a DFS.
//
// template <class Arc>
// class Visitor {
// public:
// using StateId = typename Arc::StateId;
//
// Visitor(T *return_data);
//
// // Invoked before DFS visit.
// void InitVisit(const Fst<Arc> &fst);
//
// // Invoked when state discovered (2nd arg is DFS tree root).
// bool InitState(StateId s, StateId root);
//
// // Invoked when tree arc to white/undiscovered state examined.
// bool TreeArc(StateId s, const Arc &arc);
//
// // Invoked when back arc to grey/unfinished state examined.
// bool BackArc(StateId s, const Arc &arc);
//
// // Invoked when forward or cross arc to black/finished state examined.
// bool ForwardOrCrossArc(StateId s, const Arc &arc);
//
// // Invoked when state finished ('s' is tree root, 'parent' is kNoStateId,
// // and 'arc' is nullptr).
// void FinishState(StateId s, StateId parent, const Arc *arc);
//
// // Invoked after DFS visit.
// void FinishVisit();
// };
namespace internal {
// An FST state's DFS stack state.
template <class FST>
struct DfsState {
using Arc = typename FST::Arc;
using StateId = typename Arc::StateId;
DfsState(const FST &fst, StateId s) : state_id(s), arc_iter(fst, s) {}
void *operator new(size_t size, MemoryPool<DfsState<FST>> *pool) {
return pool->Allocate();
}
static void Destroy(DfsState<FST> *dfs_state,
MemoryPool<DfsState<FST>> *pool) {
if (dfs_state) {
dfs_state->~DfsState<FST>();
pool->Free(dfs_state);
}
}
StateId state_id; // FST state.
ArcIterator<FST> arc_iter; // The corresponding arcs.
};
} // namespace internal
// Performs depth-first visitation. Visitor class argument determines actions
// and contains any return data. ArcFilter determines arcs that are considered.
// If 'access_only' is true, performs visitation only to states accessible from
// the initial state.
//
// Note this is similar to Visit() in visit.h called with a LIFO queue, except
// this version has a Visitor class specialized and augmented for a DFS.
template <class FST, class Visitor, class ArcFilter>
void DfsVisit(const FST &fst, Visitor *visitor, ArcFilter filter,
bool access_only = false) {
using Arc = typename FST::Arc;
using StateId = typename Arc::StateId;
visitor->InitVisit(fst);
const auto start = fst.Start();
if (start == kNoStateId) {
visitor->FinishVisit();
return;
}
// An FST state's DFS status
static constexpr uint8 kDfsWhite = 0; // Undiscovered.
static constexpr uint8 kDfsGrey = 1; // Discovered but unfinished.
static constexpr uint8 kDfsBlack = 2; // Finished.
std::vector<uint8> state_color;
std::stack<internal::DfsState<FST> *> state_stack; // DFS execution stack.
MemoryPool<internal::DfsState<FST>> state_pool; // Pool for DFSStates.
auto nstates = start + 1; // Number of known states in general case.
bool expanded = false;
if (fst.Properties(kExpanded, false)) { // Tests if expanded case, then
nstates = CountStates(fst); // uses ExpandedFst::NumStates().
expanded = true;
}
state_color.resize(nstates, kDfsWhite);
StateIterator<FST> siter(fst);
// Continue DFS while true.
bool dfs = true;
// Iterate over trees in DFS forest.
for (auto root = start; dfs && root < nstates;) {
state_color[root] = kDfsGrey;
state_stack.push(new (&state_pool) internal::DfsState<FST>(fst, root));
dfs = visitor->InitState(root, root);
while (!state_stack.empty()) {
auto *dfs_state = state_stack.top();
const auto s = dfs_state->state_id;
if (s >= state_color.size()) {
nstates = s + 1;
state_color.resize(nstates, kDfsWhite);
}
ArcIterator<FST> &aiter = dfs_state->arc_iter;
if (!dfs || aiter.Done()) {
state_color[s] = kDfsBlack;
internal::DfsState<FST>::Destroy(dfs_state, &state_pool);
state_stack.pop();
if (!state_stack.empty()) {
auto *parent_state = state_stack.top();
auto &piter = parent_state->arc_iter;
visitor->FinishState(s, parent_state->state_id, &piter.Value());
piter.Next();
} else {
visitor->FinishState(s, kNoStateId, nullptr);
}
continue;
}
const auto &arc = aiter.Value();
if (arc.nextstate >= state_color.size()) {
nstates = arc.nextstate + 1;
state_color.resize(nstates, kDfsWhite);
}
if (!filter(arc)) {
aiter.Next();
continue;
}
const auto next_color = state_color[arc.nextstate];
switch (next_color) {
default:
case kDfsWhite:
dfs = visitor->TreeArc(s, arc);
if (!dfs) break;
state_color[arc.nextstate] = kDfsGrey;
state_stack.push(new (&state_pool)
internal::DfsState<FST>(fst, arc.nextstate));
dfs = visitor->InitState(arc.nextstate, root);
break;
case kDfsGrey:
dfs = visitor->BackArc(s, arc);
aiter.Next();
break;
case kDfsBlack:
dfs = visitor->ForwardOrCrossArc(s, arc);
aiter.Next();
break;
}
}
if (access_only) break;
// Finds next tree root.
for (root = root == start ? 0 : root + 1;
root < nstates && state_color[root] != kDfsWhite; ++root) {
}
// Checks for a state beyond the largest known state.
if (!expanded && root == nstates) {
for (; !siter.Done(); siter.Next()) {
if (siter.Value() == nstates) {
++nstates;
state_color.push_back(kDfsWhite);
break;
}
}
}
}
visitor->FinishVisit();
}
template <class Arc, class Visitor>
void DfsVisit(const Fst<Arc> &fst, Visitor *visitor) {
DfsVisit(fst, visitor, AnyArcFilter<Arc>());
}
} // namespace fst
#endif // FST_DFS_VISIT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/const/const8-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/fst.h>
#include <fst/const-fst.h>
namespace fst {
static FstRegisterer<ConstFst<StdArc, uint8>> ConstFst_StdArc_uint8_registerer;
static FstRegisterer<ConstFst<LogArc, uint8>> ConstFst_LogArc_uint8_registerer;
static FstRegisterer<ConstFst<Log64Arc, uint8>>
ConstFst_Log64Arc_uint8_registerer;
} // namespace fst
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-nodejs_15x_16k_multiarchpkg-linux-amd64-opt.yml | build:
template_file: test-linux-opt-base.tyml
docker_image: "ubuntu:16.04"
dependencies:
- "node-package-cpu"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
system_setup:
>
${nodejs.packages_xenial.prep_15} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests.sh 15.x 16k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU NodeJS MultiArch Package 15.x tests (16kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on NodeJS MultiArch Package v15.x, CPU only, optimized version (16kHz)"
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/util/float_to_string.hh | #ifndef UTIL_FLOAT_TO_STRING_H
#define UTIL_FLOAT_TO_STRING_H
// Just for ToStringBuf
#include "util/integer_to_string.hh"
namespace util {
template <> struct ToStringBuf<double> {
// DoubleToStringConverter::kBase10MaximalLength + 1 for null paranoia.
static const unsigned kBytes = 19;
};
// Single wasn't documented in double conversion, so be conservative and
// say the same as double.
template <> struct ToStringBuf<float> {
static const unsigned kBytes = 19;
};
char *ToString(double value, char *to);
char *ToString(float value, char *to);
} // namespace util
#endif // UTIL_FLOAT_TO_STRING_H
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/linear/linearscript.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <cctype>
#include <cstdio>
#include <set>
#include <fst/compat.h>
#include <fst/flags.h>
#include <fst/extensions/linear/linearscript.h>
#include <fst/arc.h>
#include <fstream>
#include <fst/script/script-impl.h>
DEFINE_string(delimiter, "|",
"Single non-white-space character delimiter inside sequences of "
"feature symbols and output symbols");
DEFINE_string(empty_symbol, "<empty>",
"Special symbol that designates an empty sequence");
DEFINE_string(start_symbol, "<s>", "Start of sentence symbol");
DEFINE_string(end_symbol, "</s>", "End of sentence symbol");
DEFINE_bool(classifier, false,
"Treat input model as a classifier instead of a tagger");
namespace fst {
namespace script {
bool ValidateDelimiter() {
if (FLAGS_delimiter.size() == 1 && !std::isspace(FLAGS_delimiter[0]))
return true;
return false;
}
bool ValidateEmptySymbol() {
bool okay = !FLAGS_empty_symbol.empty();
for (size_t i = 0; i < FLAGS_empty_symbol.size(); ++i) {
char c = FLAGS_empty_symbol[i];
if (std::isspace(c)) okay = false;
}
return okay;
}
void LinearCompile(const string &arc_type, const string &epsilon_symbol,
const string &unknown_symbol, const string &vocab,
char **models, int models_len, const string &out,
const string &save_isymbols, const string &save_fsymbols,
const string &save_osymbols) {
LinearCompileArgs args(epsilon_symbol, unknown_symbol, vocab, models,
models_len, out, save_isymbols, save_fsymbols,
save_osymbols);
Apply<Operation<LinearCompileArgs>>("LinearCompileTpl", arc_type, &args);
}
// Instantiate templates for common arc types
REGISTER_FST_LINEAR_OPERATIONS(StdArc);
REGISTER_FST_LINEAR_OPERATIONS(LogArc);
void SplitByWhitespace(const string &str, std::vector<string> *out) {
out->clear();
std::istringstream strm(str);
string buf;
while (strm >> buf) out->push_back(buf);
}
int ScanNumClasses(char **models, int models_len) {
std::set<string> preds;
for (int i = 0; i < models_len; ++i) {
std::ifstream in(models[i]);
if (!in) LOG(FATAL) << "Failed to open " << models[i];
string line;
std::getline(in, line);
size_t num_line = 1;
while (std::getline(in, line)) {
++num_line;
std::vector<string> fields;
SplitByWhitespace(line, &fields);
if (fields.size() != 3)
LOG(FATAL) << "Wrong number of fields in source " << models[i]
<< ", line " << num_line;
preds.insert(fields[1]);
}
}
return preds.size();
}
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT/doc | coqui_public_repos/STT/doc/playbook/DEEPSPEECH.md | [Home](README.md) | [Previous - Introduction](INTRO.md) | [Next - Formatting your training data](DATA_FORMATTING.md)
# About DeepSpeech
## Contents
- [About DeepSpeech](#about-deepspeech)
* [Contents](#contents)
* [What does DeepSpeech do?](#what-does-deepspeech-do-)
* [How does DeepSpeech work?](#how-does-deepspeech-work-)
* [How is DeepSpeech implemented?](#how-is-deepspeech-implemented-)
## What does DeepSpeech do?
DeepSpeech is a tool for automatically transcribing spoken audio. DeepSpeech takes digital audio as input and returns a "most likely" text transcript of that audio.
DeepSpeech is an implementation of the DeepSpeech algorithm developed by Baidu and presented in this research paper:
> Hannun, A., Case, C., Casper, J., Catanzaro, B., Diamos, G., Elsen, E., Prenger R, Satheesh S, Sengupta S, Coates A., & Ng, A. Y. (2014). Deep speech: Scaling up end-to-end speech recognition. [arXiv preprint arXiv:1412.5567](https://arxiv.org/pdf/1412.5567).
DeepSpeech can be used for two key activities related to speech recognition - _training_ and _inference_. Speech recognition _inference_ - the process of converting spoken audio to written text - relies on a _trained model_. DeepSpeech can be used, with appropriate hardware (GPU) to train a model using a set of voice data, known as a _corpus_. Then, _inference_ or _recognition_ can be performed using the trained model. DeepSpeech includes several pre-trained models.
**This Playbook is focused on helping you train your own model.**
## How does DeepSpeech work?
DeepSpeech takes a stream of audio as input, and converts that stream of audio into a sequence of characters in the designated alphabet. This conversion is made possible by two basic steps: First, the audio is converted into a sequence of probabilities over characters in the alphabet. Secondly, this sequence of probabilities is converted into a sequence of characters.
The first step is made possible by a [Deep Neural Network](https://en.wikipedia.org/wiki/Deep_learning#Deep_neural_networks), and the second step is made possible by an [N-gram](https://en.wikipedia.org/wiki/N-gram)language model. The neural network is trained on audio and corresponding text transcripts, and the N-gram language model is trained on a text corpus (which is often different from the text transcripts of the audio). The neural model is trained to predict the text from speech, and the language model is trained to predict text from preceding text. At a very high level, you can think of the first part (the acoustic model) as a phonetic transcriber, and the second part (the language model) as a spelling and grammar checker.
## How is DeepSpeech implemented?
The core of DeepSpeech is written in C++, but it has bindings to Python, .NET, Java, JavaScript, and community-based bindings for Golang, Rust, Vlang, and NIM-lang.
---
[Home](README.md) | [Previous - Introduction](INTRO.md) | [Next - Formatting your training data](DATA_FORMATTING.md)
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-electronjs_v7.0_16k-linux-amd64-opt.yml | build:
template_file: test-linux-opt-base.tyml
docker_image: "ubuntu:16.04"
dependencies:
- "linux-amd64-cpu-opt"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
system_setup:
>
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} ${electronjs.packages_xenial.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 7.0.1 16k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU ElectronJS v7.0 tests (16kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on ElectronJS v7.0, CPU only, optimized version (16kHz)"
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/framework/ml_value.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <string>
#ifndef SHARED_PROVIDER
#include "core/common/common.h"
#include "core/common/exceptions.h"
#include "core/framework/allocator.h"
#include "core/framework/data_types.h"
#include "core/framework/tensor.h"
#include "core/framework/TensorSeq.h"
#endif
namespace onnxruntime {
class SparseTensor;
} // namespace onnxruntime
/**
Represents both tensors and non-tensors.
*/
struct OrtValue {
public:
OrtValue() : data_(nullptr) {}
~OrtValue() = default;
OrtValue(void* pData, onnxruntime::MLDataType type, onnxruntime::DeleteFunc deleter) {
Init(pData, type, deleter);
}
void Init(void* pData, onnxruntime::MLDataType type, onnxruntime::DeleteFunc deleter) {
data_.reset(pData, deleter);
type_ = type;
}
void Init(void* pData, onnxruntime::MLDataType type, const std::function<void(void*)>& deleter) {
data_.reset(pData, deleter);
type_ = type;
}
bool IsAllocated() const {
return data_ && type_;
}
template <typename T>
const T& Get() const {
ORT_ENFORCE(onnxruntime::DataTypeImpl::GetType<T>() == type_, onnxruntime::DataTypeImpl::GetType<T>(), " != ", type_);
return *static_cast<T*>(data_.get());
}
template <typename T>
T* GetMutable() {
ORT_ENFORCE(onnxruntime::DataTypeImpl::GetType<T>() == type_, onnxruntime::DataTypeImpl::GetType<T>(), " != ", type_);
return static_cast<T*>(data_.get());
}
bool IsTensor() const noexcept {
return (type_ != nullptr && type_->IsTensorType());
}
bool IsTensorSequence() const noexcept {
return (type_ != nullptr && type_->IsTensorSequenceType());
}
bool IsSparseTensor() const noexcept {
return (type_ != nullptr && type_->IsSparseTensorType());
}
onnxruntime::MLDataType Type() const {
return type_;
}
onnxruntime::Fence_t Fence() const {
return fence_.get();
}
void SetFence(onnxruntime::FencePtr fence) {
fence_ = fence;
}
void ShareFenceWith(OrtValue& v) {
fence_ = v.fence_;
}
private:
std::shared_ptr<void> data_;
onnxruntime::MLDataType type_{nullptr};
onnxruntime::FencePtr fence_;
};
template <>
inline const onnxruntime::Tensor& OrtValue::Get<onnxruntime::Tensor>() const {
ORT_ENFORCE(IsTensor(), "Trying to get a Tensor, but got: ", onnxruntime::DataTypeImpl::ToString(type_));
return *static_cast<onnxruntime::Tensor*>(data_.get());
}
template <>
inline onnxruntime::Tensor* OrtValue::GetMutable<onnxruntime::Tensor>() {
ORT_ENFORCE(IsTensor(), "Trying to get a Tensor, but got: ", onnxruntime::DataTypeImpl::ToString(type_));
return static_cast<onnxruntime::Tensor*>(data_.get());
}
template <>
inline const onnxruntime::TensorSeq& OrtValue::Get<onnxruntime::TensorSeq>() const {
ORT_ENFORCE(IsTensorSequence(), "Trying to get a TensorSeq, but got: ", onnxruntime::DataTypeImpl::ToString(type_));
return *static_cast<onnxruntime::TensorSeq*>(data_.get());
}
template <>
inline onnxruntime::TensorSeq* OrtValue::GetMutable<onnxruntime::TensorSeq>() {
ORT_ENFORCE(IsTensorSequence(), "Trying to get a TensorSeq, but got: ", onnxruntime::DataTypeImpl::ToString(type_));
return static_cast<onnxruntime::TensorSeq*>(data_.get());
}
template <>
inline const onnxruntime::SparseTensor& OrtValue::Get<onnxruntime::SparseTensor>() const {
ORT_ENFORCE(IsSparseTensor(), "Trying to get a SparseTensor, but got: ", onnxruntime::DataTypeImpl::ToString(type_));
return *static_cast<onnxruntime::SparseTensor*>(data_.get());
}
template <>
inline onnxruntime::SparseTensor* OrtValue::GetMutable<onnxruntime::SparseTensor>() {
ORT_ENFORCE(IsSparseTensor(), "Trying to get a SparseTensor, but got: ", onnxruntime::DataTypeImpl::ToString(type_));
return static_cast<onnxruntime::SparseTensor*>(data_.get());
}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions/linear/trie.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#ifndef FST_EXTENSIONS_LINEAR_TRIE_H_
#define FST_EXTENSIONS_LINEAR_TRIE_H_
#include <unordered_map>
#include <utility>
#include <vector>
#include <fst/compat.h>
#include <fst/util.h>
namespace fst {
const int kNoTrieNodeId = -1;
// Forward declarations of all available trie topologies.
template <class L, class H>
class NestedTrieTopology;
template <class L, class H>
class FlatTrieTopology;
// A pair of parent node id and label, part of a trie edge
template <class L>
struct ParentLabel {
int parent;
L label;
ParentLabel() {}
ParentLabel(int p, L l) : parent(p), label(l) {}
bool operator==(const ParentLabel &that) const {
return parent == that.parent && label == that.label;
}
std::istream &Read(std::istream &strm) { // NOLINT
ReadType(strm, &parent);
ReadType(strm, &label);
return strm;
}
std::ostream &Write(std::ostream &strm) const { // NOLINT
WriteType(strm, parent);
WriteType(strm, label);
return strm;
}
};
template <class L, class H>
struct ParentLabelHash {
size_t operator()(const ParentLabel<L> &pl) const {
return static_cast<size_t>(pl.parent * 7853 + H()(pl.label));
}
};
// The trie topology in a nested tree of hash maps; allows efficient
// iteration over children of a specific node.
template <class L, class H>
class NestedTrieTopology {
public:
typedef L Label;
typedef H Hash;
typedef std::unordered_map<L, int, H> NextMap;
class const_iterator {
public:
typedef std::forward_iterator_tag iterator_category;
typedef std::pair<ParentLabel<L>, int> value_type;
typedef std::ptrdiff_t difference_type;
typedef const value_type *pointer;
typedef const value_type &reference;
friend class NestedTrieTopology<L, H>;
const_iterator() : ptr_(nullptr), cur_node_(kNoTrieNodeId), cur_edge_() {}
reference operator*() {
UpdateStub();
return stub_;
}
pointer operator->() {
UpdateStub();
return &stub_;
}
const_iterator &operator++();
const_iterator &operator++(int); // NOLINT
bool operator==(const const_iterator &that) const {
return ptr_ == that.ptr_ && cur_node_ == that.cur_node_ &&
cur_edge_ == that.cur_edge_;
}
bool operator!=(const const_iterator &that) const {
return !(*this == that);
}
private:
const_iterator(const NestedTrieTopology *ptr, int cur_node)
: ptr_(ptr), cur_node_(cur_node) {
SetProperCurEdge();
}
void SetProperCurEdge() {
if (cur_node_ < ptr_->NumNodes())
cur_edge_ = ptr_->nodes_[cur_node_]->begin();
else
cur_edge_ = ptr_->nodes_[0]->begin();
}
void UpdateStub() {
stub_.first = ParentLabel<L>(cur_node_, cur_edge_->first);
stub_.second = cur_edge_->second;
}
const NestedTrieTopology *ptr_;
int cur_node_;
typename NextMap::const_iterator cur_edge_;
value_type stub_;
};
NestedTrieTopology();
NestedTrieTopology(const NestedTrieTopology &that);
~NestedTrieTopology();
void swap(NestedTrieTopology &that);
NestedTrieTopology &operator=(const NestedTrieTopology &that);
bool operator==(const NestedTrieTopology &that) const;
bool operator!=(const NestedTrieTopology &that) const;
int Root() const { return 0; }
size_t NumNodes() const { return nodes_.size(); }
int Insert(int parent, const L &label);
int Find(int parent, const L &label) const;
const NextMap &ChildrenOf(int parent) const { return *nodes_[parent]; }
std::istream &Read(std::istream &strm); // NOLINT
std::ostream &Write(std::ostream &strm) const; // NOLINT
const_iterator begin() const { return const_iterator(this, 0); }
const_iterator end() const { return const_iterator(this, NumNodes()); }
private:
std::vector<NextMap *>
nodes_; // Use pointers to avoid copying the maps when the
// vector grows
};
template <class L, class H>
NestedTrieTopology<L, H>::NestedTrieTopology() {
nodes_.push_back(new NextMap);
}
template <class L, class H>
NestedTrieTopology<L, H>::NestedTrieTopology(const NestedTrieTopology &that) {
nodes_.reserve(that.nodes_.size());
for (size_t i = 0; i < that.nodes_.size(); ++i) {
NextMap *node = that.nodes_[i];
nodes_.push_back(new NextMap(*node));
}
}
template <class L, class H>
NestedTrieTopology<L, H>::~NestedTrieTopology() {
for (size_t i = 0; i < nodes_.size(); ++i) {
NextMap *node = nodes_[i];
delete node;
}
}
// TODO(wuke): std::swap compatibility
template <class L, class H>
inline void NestedTrieTopology<L, H>::swap(NestedTrieTopology &that) {
nodes_.swap(that.nodes_);
}
template <class L, class H>
inline NestedTrieTopology<L, H> &NestedTrieTopology<L, H>::operator=(
const NestedTrieTopology &that) {
NestedTrieTopology copy(that);
swap(copy);
return *this;
}
template <class L, class H>
inline bool NestedTrieTopology<L, H>::operator==(
const NestedTrieTopology &that) const {
if (NumNodes() != that.NumNodes()) return false;
for (int i = 0; i < NumNodes(); ++i)
if (ChildrenOf(i) != that.ChildrenOf(i)) return false;
return true;
}
template <class L, class H>
inline bool NestedTrieTopology<L, H>::operator!=(
const NestedTrieTopology &that) const {
return !(*this == that);
}
template <class L, class H>
inline int NestedTrieTopology<L, H>::Insert(int parent, const L &label) {
int ret = Find(parent, label);
if (ret == kNoTrieNodeId) {
ret = NumNodes();
(*nodes_[parent])[label] = ret;
nodes_.push_back(new NextMap);
}
return ret;
}
template <class L, class H>
inline int NestedTrieTopology<L, H>::Find(int parent, const L &label) const {
typename NextMap::const_iterator it = nodes_[parent]->find(label);
return it == nodes_[parent]->end() ? kNoTrieNodeId : it->second;
}
template <class L, class H>
inline std::istream &NestedTrieTopology<L, H>::Read(
std::istream &strm) { // NOLINT
NestedTrieTopology new_trie;
size_t num_nodes;
if (!ReadType(strm, &num_nodes)) return strm;
for (size_t i = 1; i < num_nodes; ++i) new_trie.nodes_.push_back(new NextMap);
for (size_t i = 0; i < num_nodes; ++i) ReadType(strm, new_trie.nodes_[i]);
if (strm) swap(new_trie);
return strm;
}
template <class L, class H>
inline std::ostream &NestedTrieTopology<L, H>::Write(
std::ostream &strm) const { // NOLINT
WriteType(strm, NumNodes());
for (size_t i = 0; i < NumNodes(); ++i) WriteType(strm, *nodes_[i]);
return strm;
}
template <class L, class H>
inline typename NestedTrieTopology<L, H>::const_iterator
&NestedTrieTopology<L, H>::const_iterator::operator++() {
++cur_edge_;
if (cur_edge_ == ptr_->nodes_[cur_node_]->end()) {
++cur_node_;
while (cur_node_ < ptr_->NumNodes() && ptr_->nodes_[cur_node_]->empty())
++cur_node_;
SetProperCurEdge();
}
return *this;
}
template <class L, class H>
inline typename NestedTrieTopology<L, H>::const_iterator
&NestedTrieTopology<L, H>::const_iterator::operator++(int) { // NOLINT
const_iterator save(*this);
++(*this);
return save;
}
// The trie topology in a single hash map; only allows iteration over
// all the edges in arbitrary order.
template <class L, class H>
class FlatTrieTopology {
private:
typedef std::unordered_map<ParentLabel<L>, int, ParentLabelHash<L, H>>
NextMap;
public:
// Iterator over edges as std::pair<ParentLabel<L>, int>
typedef typename NextMap::const_iterator const_iterator;
typedef L Label;
typedef H Hash;
FlatTrieTopology() {}
FlatTrieTopology(const FlatTrieTopology &that) : next_(that.next_) {}
template <class T>
explicit FlatTrieTopology(const T &that);
// TODO(wuke): std::swap compatibility
void swap(FlatTrieTopology &that) { next_.swap(that.next_); }
bool operator==(const FlatTrieTopology &that) const {
return next_ == that.next_;
}
bool operator!=(const FlatTrieTopology &that) const {
return !(*this == that);
}
int Root() const { return 0; }
size_t NumNodes() const { return next_.size() + 1; }
int Insert(int parent, const L &label);
int Find(int parent, const L &label) const;
std::istream &Read(std::istream &strm) { // NOLINT
return ReadType(strm, &next_);
}
std::ostream &Write(std::ostream &strm) const { // NOLINT
return WriteType(strm, next_);
}
const_iterator begin() const { return next_.begin(); }
const_iterator end() const { return next_.end(); }
private:
NextMap next_;
};
template <class L, class H>
template <class T>
FlatTrieTopology<L, H>::FlatTrieTopology(const T &that)
: next_(that.begin(), that.end()) {}
template <class L, class H>
inline int FlatTrieTopology<L, H>::Insert(int parent, const L &label) {
int ret = Find(parent, label);
if (ret == kNoTrieNodeId) {
ret = NumNodes();
next_[ParentLabel<L>(parent, label)] = ret;
}
return ret;
}
template <class L, class H>
inline int FlatTrieTopology<L, H>::Find(int parent, const L &label) const {
typename NextMap::const_iterator it =
next_.find(ParentLabel<L>(parent, label));
return it == next_.end() ? kNoTrieNodeId : it->second;
}
// A collection of implementations of the trie data structure. The key
// is a sequence of type `L` which must be hashable. The value is of
// `V` which must be default constructible and copyable. In addition,
// a value object is stored for each node in the trie therefore
// copying `V` should be cheap.
//
// One can access the store values with an integer node id, using the
// [] operator. A valid node id can be obtained by the following ways:
//
// 1. Using the `Root()` method to get the node id of the root.
//
// 2. Iterating through 0 to `NumNodes() - 1`. The node ids are dense
// so every integer in this range is a valid node id.
//
// 3. Using the node id returned from a successful `Insert()` or
// `Find()` call.
//
// 4. Iterating over the trie edges with an `EdgeIterator` and using
// the node ids returned from its `Parent()` and `Child()` methods.
//
// Below is an example of inserting keys into the trie:
//
// const string words[] = {"hello", "health", "jello"};
// Trie<char, bool> dict;
// for (auto word : words) {
// int cur = dict.Root();
// for (char c : word) {
// cur = dict.Insert(cur, c);
// }
// dict[cur] = true;
// }
//
// And the following is an example of looking up the longest prefix of
// a string using the trie constructed above:
//
// string query = "healed";
// size_t prefix_length = 0;
// int cur = dict.Find(dict.Root(), query[prefix_length]);
// while (prefix_length < query.size() &&
// cur != Trie<char, bool>::kNoNodeId) {
// ++prefix_length;
// cur = dict.Find(cur, query[prefix_length]);
// }
template <class L, class V, class T>
class MutableTrie {
public:
template <class LL, class VV, class TT>
friend class MutableTrie;
typedef L Label;
typedef V Value;
typedef T Topology;
// Constructs a trie with only the root node.
MutableTrie() {}
// Conversion from another trie of a possiblly different
// topology. The underlying topology must supported conversion.
template <class S>
explicit MutableTrie(const MutableTrie<L, V, S> &that)
: topology_(that.topology_), values_(that.values_) {}
// TODO(wuke): std::swap compatibility
void swap(MutableTrie &that) {
topology_.swap(that.topology_);
values_.swap(that.values_);
}
int Root() const { return topology_.Root(); }
size_t NumNodes() const { return topology_.NumNodes(); }
// Inserts an edge with given `label` at node `parent`. Returns the
// child node id. If the node already exists, returns the node id
// right away.
int Insert(int parent, const L &label) {
int ret = topology_.Insert(parent, label);
values_.resize(NumNodes());
return ret;
}
// Finds the node id of the node from `parent` via `label`. Returns
// `kNoTrieNodeId` when such a node does not exist.
int Find(int parent, const L &label) const {
return topology_.Find(parent, label);
}
const T &TrieTopology() const { return topology_; }
// Accesses the value stored for the given node.
V &operator[](int node_id) { return values_[node_id]; }
const V &operator[](int node_id) const { return values_[node_id]; }
// Comparison by content
bool operator==(const MutableTrie &that) const {
return topology_ == that.topology_ && values_ == that.values_;
}
bool operator!=(const MutableTrie &that) const { return !(*this == that); }
std::istream &Read(std::istream &strm) { // NOLINT
ReadType(strm, &topology_);
ReadType(strm, &values_);
return strm;
}
std::ostream &Write(std::ostream &strm) const { // NOLINT
WriteType(strm, topology_);
WriteType(strm, values_);
return strm;
}
private:
T topology_;
std::vector<V> values_;
};
} // namespace fst
#endif // FST_EXTENSIONS_LINEAR_TRIE_H_
| 0 |
coqui_public_repos/xtts-streaming-server | coqui_public_repos/xtts-streaming-server/server/Dockerfile | FROM pytorch/pytorch:2.1.0-cuda11.8-cudnn8-devel
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install --no-install-recommends -y sox libsox-fmt-all curl wget gcc git git-lfs build-essential libaio-dev libsndfile1 ssh ffmpeg && \
apt-get clean && apt-get -y autoremove
WORKDIR /app
COPY requirements.txt .
RUN python -m pip install --use-deprecated=legacy-resolver -r requirements.txt \
&& python -m pip cache purge
RUN python -m unidic download
RUN mkdir -p /app/tts_models
COPY main.py .
ENV NVIDIA_DISABLE_REQUIRE=1
ENV NUM_THREADS=2
EXPOSE 80
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"]
| 0 |
coqui_public_repos/TTS/TTS/tts/layers | coqui_public_repos/TTS/TTS/tts/layers/delightful_tts/kernel_predictor.py | import torch.nn as nn # pylint: disable=consider-using-from-import
from torch.nn.utils import parametrize
class KernelPredictor(nn.Module):
"""Kernel predictor for the location-variable convolutions
Args:
cond_channels (int): number of channel for the conditioning sequence,
conv_in_channels (int): number of channel for the input sequence,
conv_out_channels (int): number of channel for the output sequence,
conv_layers (int): number of layers
"""
def __init__( # pylint: disable=dangerous-default-value
self,
cond_channels,
conv_in_channels,
conv_out_channels,
conv_layers,
conv_kernel_size=3,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
kpnet_nonlinear_activation="LeakyReLU",
kpnet_nonlinear_activation_params={"negative_slope": 0.1},
):
super().__init__()
self.conv_in_channels = conv_in_channels
self.conv_out_channels = conv_out_channels
self.conv_kernel_size = conv_kernel_size
self.conv_layers = conv_layers
kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
self.input_conv = nn.Sequential(
nn.utils.parametrizations.weight_norm(
nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)
),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
self.residual_convs = nn.ModuleList()
padding = (kpnet_conv_size - 1) // 2
for _ in range(3):
self.residual_convs.append(
nn.Sequential(
nn.Dropout(kpnet_dropout),
nn.utils.parametrizations.weight_norm(
nn.Conv1d(
kpnet_hidden_channels,
kpnet_hidden_channels,
kpnet_conv_size,
padding=padding,
bias=True,
)
),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
nn.utils.parametrizations.weight_norm(
nn.Conv1d(
kpnet_hidden_channels,
kpnet_hidden_channels,
kpnet_conv_size,
padding=padding,
bias=True,
)
),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
)
self.kernel_conv = nn.utils.parametrizations.weight_norm(
nn.Conv1d(
kpnet_hidden_channels,
kpnet_kernel_channels,
kpnet_conv_size,
padding=padding,
bias=True,
)
)
self.bias_conv = nn.utils.parametrizations.weight_norm(
nn.Conv1d(
kpnet_hidden_channels,
kpnet_bias_channels,
kpnet_conv_size,
padding=padding,
bias=True,
)
)
def forward(self, c):
"""
Args:
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
"""
batch, _, cond_length = c.shape
c = self.input_conv(c)
for residual_conv in self.residual_convs:
residual_conv.to(c.device)
c = c + residual_conv(c)
k = self.kernel_conv(c)
b = self.bias_conv(c)
kernels = k.contiguous().view(
batch,
self.conv_layers,
self.conv_in_channels,
self.conv_out_channels,
self.conv_kernel_size,
cond_length,
)
bias = b.contiguous().view(
batch,
self.conv_layers,
self.conv_out_channels,
cond_length,
)
return kernels, bias
def remove_weight_norm(self):
parametrize.remove_parametrizations(self.input_conv[0], "weight")
parametrize.remove_parametrizations(self.kernel_conv, "weight")
parametrize.remove_parametrizations(self.bias_conv, "weight")
for block in self.residual_convs:
parametrize.remove_parametrizations(block[1], "weight")
parametrize.remove_parametrizations(block[3], "weight")
| 0 |
coqui_public_repos/STT-models/portuguese/itml | coqui_public_repos/STT-models/portuguese/itml/v0.1.1/alphabet.txt |
'
-
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
q
r
s
t
u
v
w
x
y
z
à
á
â
ã
ç
é
ê
í
ó
ô
õ
ú
ü
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/mpdt/mpdtexpand.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Expands a (bounded-stack) MPDT as an FST.
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/extensions/mpdt/mpdtscript.h>
#include <fst/extensions/mpdt/read_write_utils.h>
#include <fst/util.h>
DEFINE_string(mpdt_parentheses, "",
"MPDT parenthesis label pairs with assignments");
DEFINE_bool(connect, true, "Trim output?");
DEFINE_bool(keep_parentheses, false, "Keep PDT parentheses in result?");
int main(int argc, char **argv) {
namespace s = fst::script;
using fst::script::FstClass;
using fst::script::VectorFstClass;
using fst::ReadLabelTriples;
using fst::MPdtExpandOptions;
string usage = "Expand a (bounded-stack) MPDT as an FST.\n\n Usage: ";
usage += argv[0];
usage += " in.pdt [out.fst]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
if (argc > 3) {
ShowUsage();
return 1;
}
const string in_name =
(argc > 1 && (strcmp(argv[1], "-") != 0)) ? argv[1] : "";
const string out_name = argc > 2 ? argv[2] : "";
std::unique_ptr<FstClass> ifst(FstClass::Read(in_name));
if (!ifst) return 1;
if (FLAGS_mpdt_parentheses.empty()) {
LOG(ERROR) << argv[0] << ": No MPDT parenthesis label pairs provided";
return 1;
}
std::vector<s::LabelPair> parens;
std::vector<int64> assignments;
if (!ReadLabelTriples(FLAGS_mpdt_parentheses, &parens, &assignments, false))
return 1;
VectorFstClass ofst(ifst->ArcType());
const MPdtExpandOptions opts(FLAGS_connect, FLAGS_keep_parentheses);
s::MPdtExpand(*ifst, parens, assignments, &ofst, opts);
ofst.Write(out_name);
return 0;
}
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/expanded-fst.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Generic FST augmented with state count-interface class definition.
#ifndef FST_EXPANDED_FST_H_
#define FST_EXPANDED_FST_H_
#include <sys/types.h>
#include <istream>
#include <string>
#include <fst/log.h>
#include <fstream>
#include <fst/fst.h>
namespace fst {
// A generic FST plus state count.
template <class A>
class ExpandedFst : public Fst<A> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
virtual StateId NumStates() const = 0; // State count
// Get a copy of this ExpandedFst. See Fst<>::Copy() for further doc.
ExpandedFst<Arc> *Copy(bool safe = false) const override = 0;
// Read an ExpandedFst from an input stream; return NULL on error.
static ExpandedFst<Arc> *Read(std::istream &strm,
const FstReadOptions &opts) {
FstReadOptions ropts(opts);
FstHeader hdr;
if (ropts.header) {
hdr = *opts.header;
} else {
if (!hdr.Read(strm, opts.source)) return nullptr;
ropts.header = &hdr;
}
if (!(hdr.Properties() & kExpanded)) {
LOG(ERROR) << "ExpandedFst::Read: Not an ExpandedFst: " << ropts.source;
return nullptr;
}
const auto reader =
FstRegister<Arc>::GetRegister()->GetReader(hdr.FstType());
if (!reader) {
LOG(ERROR) << "ExpandedFst::Read: Unknown FST type \"" << hdr.FstType()
<< "\" (arc type = \"" << A::Type() << "\"): " << ropts.source;
return nullptr;
}
auto *fst = reader(strm, ropts);
if (!fst) return nullptr;
return static_cast<ExpandedFst<Arc> *>(fst);
}
// Read an ExpandedFst from a file; return NULL on error.
// Empty filename reads from standard input.
static ExpandedFst<Arc> *Read(const string &filename) {
if (!filename.empty()) {
std::ifstream strm(filename,
std::ios_base::in | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "ExpandedFst::Read: Can't open file: " << filename;
return nullptr;
}
return Read(strm, FstReadOptions(filename));
} else {
return Read(std::cin, FstReadOptions("standard input"));
}
}
};
namespace internal {
// ExpandedFst<A> case - abstract methods.
template <class Arc>
inline typename Arc::Weight Final(const ExpandedFst<Arc> &fst,
typename Arc::StateId s) {
return fst.Final(s);
}
template <class Arc>
inline ssize_t NumArcs(const ExpandedFst<Arc> &fst, typename Arc::StateId s) {
return fst.NumArcs(s);
}
template <class Arc>
inline ssize_t NumInputEpsilons(const ExpandedFst<Arc> &fst,
typename Arc::StateId s) {
return fst.NumInputEpsilons(s);
}
template <class Arc>
inline ssize_t NumOutputEpsilons(const ExpandedFst<Arc> &fst,
typename Arc::StateId s) {
return fst.NumOutputEpsilons(s);
}
} // namespace internal
// A useful alias when using StdArc.
using StdExpandedFst = ExpandedFst<StdArc>;
// This is a helper class template useful for attaching an ExpandedFst
// interface to its implementation, handling reference counting. It
// delegates to ImplToFst the handling of the Fst interface methods.
template <class Impl, class FST = ExpandedFst<typename Impl::Arc>>
class ImplToExpandedFst : public ImplToFst<Impl, FST> {
public:
using Arc = typename FST::Arc;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using ImplToFst<Impl, FST>::operator=;
StateId NumStates() const override { return GetImpl()->NumStates(); }
protected:
using ImplToFst<Impl, FST>::GetImpl;
explicit ImplToExpandedFst(std::shared_ptr<Impl> impl)
: ImplToFst<Impl, FST>(impl) {}
ImplToExpandedFst(const ImplToExpandedFst<Impl, FST> &fst)
: ImplToFst<Impl, FST>(fst) {}
ImplToExpandedFst(const ImplToExpandedFst<Impl, FST> &fst, bool safe)
: ImplToFst<Impl, FST>(fst, safe) {}
static Impl *Read(std::istream &strm, const FstReadOptions &opts) {
return Impl::Read(strm, opts);
}
// Read FST implementation from a file; return NULL on error.
// Empty filename reads from standard input.
static Impl *Read(const string &filename) {
if (!filename.empty()) {
std::ifstream strm(filename,
std::ios_base::in | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "ExpandedFst::Read: Can't open file: " << filename;
return nullptr;
}
return Impl::Read(strm, FstReadOptions(filename));
} else {
return Impl::Read(std::cin, FstReadOptions("standard input"));
}
}
};
// Function to return the number of states in an FST, counting them
// if necessary.
template <class Arc>
typename Arc::StateId CountStates(const Fst<Arc> &fst) {
if (fst.Properties(kExpanded, false)) {
const auto *efst = static_cast<const ExpandedFst<Arc> *>(&fst);
return efst->NumStates();
} else {
typename Arc::StateId nstates = 0;
for (StateIterator<Fst<Arc>> siter(fst); !siter.Done(); siter.Next()) {
++nstates;
}
return nstates;
}
}
// Function to return the number of arcs in an FST.
template <class Arc>
typename Arc::StateId CountArcs(const Fst<Arc> &fst) {
size_t narcs = 0;
for (StateIterator<Fst<Arc>> siter(fst); !siter.Done(); siter.Next()) {
narcs += fst.NumArcs(siter.Value());
}
return narcs;
}
} // namespace fst
#endif // FST_EXPANDED_FST_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/test/Makefile.am | AM_CPPFLAGS = -I$(srcdir)/../include $(ICU_CPPFLAGS)
LDADD = ../lib/libfst.la -lm $(DL_LIBS)
check_PROGRAMS = fst_test weight_test
fst_test_SOURCES = fst_test.cc
weight_test_SOURCES = weight_test.cc
algo_test_SOURCES = algo_test.cc
check_PROGRAMS += algo_test_log
algo_test_log_SOURCES = $(algo_test_SOURCES)
algo_test_log_CPPFLAGS = -DTEST_LOG $(AM_CPPFLAGS)
check_PROGRAMS += algo_test_tropical
algo_test_tropical_SOURCES = $(algo_test_SOURCES)
algo_test_tropical_CPPFLAGS = -DTEST_TROPICAL $(AM_CPPFLAGS)
check_PROGRAMS += algo_test_minmax
algo_test_minmax_SOURCES = $(algo_test_SOURCES)
algo_test_minmax_CPPFLAGS = -DTEST_MINMAX $(AM_CPPFLAGS)
check_PROGRAMS += algo_test_lexicographic
algo_test_lexicographic_SOURCES = $(algo_test_SOURCES)
algo_test_lexicographic_CPPFLAGS = -DTEST_LEXICOGRAPHIC $(AM_CPPFLAGS)
check_PROGRAMS += algo_test_power
algo_test_power_SOURCES = $(algo_test_SOURCES)
algo_test_power_CPPFLAGS = -DTEST_POWER $(AM_CPPFLAGS)
TESTS = $(check_PROGRAMS)
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/doc/Error-Codes.rst | .. _error-codes:
Error codes
===========
Below is the definition for all error codes used in the API, their numerical values, and a human readable description.
.. literalinclude:: ../native_client/coqui-stt.h
:language: c
:start-after: sphinx-doc: error_code_listing_start
:end-before: sphinx-doc: error_code_listing_end
| 0 |
coqui_public_repos/TTS/docs | coqui_public_repos/TTS/docs/source/make.bat | @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7 | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/m4/ltversion.m4 | # ltversion.m4 -- version numbers -*- Autoconf -*-
#
# Copyright (C) 2004 Free Software Foundation, Inc.
# Written by Scott James Remnant, 2004
#
# This file is free software; the Free Software Foundation gives
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
# @configure_input@
# serial 3337 ltversion.m4
# This file is part of GNU Libtool
m4_define([LT_PACKAGE_VERSION], [2.4.2])
m4_define([LT_PACKAGE_REVISION], [1.3337])
AC_DEFUN([LTVERSION_VERSION],
[macro_version='2.4.2'
macro_revision='1.3337'
_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
_LT_DECL(, macro_revision, 0)
])
| 0 |
coqui_public_repos/TTS/TTS/tts | coqui_public_repos/TTS/TTS/tts/utils/measures.py | def alignment_diagonal_score(alignments, binary=False):
"""
Compute how diagonal alignment predictions are. It is useful
to measure the alignment consistency of a model
Args:
alignments (torch.Tensor): batch of alignments.
binary (bool): if True, ignore scores and consider attention
as a binary mask.
Shape:
- alignments : :math:`[B, T_de, T_en]`
"""
maxs = alignments.max(dim=1)[0]
if binary:
maxs[maxs > 0] = 1
return maxs.mean(dim=1).mean(dim=0).item()
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/win-package.sh | #!/bin/bash
set -xe
source $(dirname "$0")/tc-tests-utils.sh
mkdir -p ${TASKCLUSTER_ARTIFACTS} || true
cp ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel*.log ${TASKCLUSTER_ARTIFACTS}/
package_native_client "native_client.tar.xz"
package_libdeepspeech_as_zip "libdeepspeech.zip"
cp ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/dotnet/*.nupkg ${TASKCLUSTER_ARTIFACTS}/
cp ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/dotnet/DeepSpeechConsole/bin/x64/Release/DeepSpeechConsole.exe ${TASKCLUSTER_ARTIFACTS}/
if [ -d ${DS_ROOT_TASK}/DeepSpeech/ds/wheels ]; then
cp ${DS_ROOT_TASK}/DeepSpeech/ds/wheels/* ${TASKCLUSTER_ARTIFACTS}/
fi;
if [ -f ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/javascript/wrapper.tar.gz ]; then
cp ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/javascript/wrapper.tar.gz ${TASKCLUSTER_ARTIFACTS}/
cp ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/javascript/deepspeech-*.tgz ${TASKCLUSTER_ARTIFACTS}/
fi;
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/mpdt/compose.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Compose an MPDT and an FST.
#ifndef FST_EXTENSIONS_MPDT_COMPOSE_H_
#define FST_EXTENSIONS_MPDT_COMPOSE_H_
#include <list>
#include <fst/extensions/mpdt/mpdt.h>
#include <fst/extensions/pdt/compose.h>
#include <fst/compose.h>
namespace fst {
template <class Filter>
class MPdtParenFilter {
public:
using FST1 = typename Filter::FST1;
using FST2 = typename Filter::FST2;
using Arc = typename Filter::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using Matcher1 = typename Filter::Matcher1;
using Matcher2 = typename Filter::Matcher2;
using StackId = StateId;
using ParenStack = internal::MPdtStack<StackId, Label>;
using FilterState1 = typename Filter::FilterState;
using FilterState2 = IntegerFilterState<StackId>;
using FilterState = PairFilterState<FilterState1, FilterState2>;
MPdtParenFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr,
const std::vector<std::pair<Label, Label>> *parens = nullptr,
const std::vector<Label> *assignments = nullptr,
bool expand = false, bool keep_parens = true)
: filter_(fst1, fst2, matcher1, matcher2),
parens_(parens ? *parens : std::vector<std::pair<Label, Label>>()),
assignments_(assignments ? *assignments : std::vector<Label>()),
expand_(expand),
keep_parens_(keep_parens),
fs_(FilterState::NoState()),
stack_(parens_, assignments_),
paren_id_(-1) {
if (parens) {
for (const auto &pair : *parens) {
parens_.push_back(pair);
GetMatcher1()->AddOpenParen(pair.first);
GetMatcher2()->AddOpenParen(pair.first);
if (!expand_) {
GetMatcher1()->AddCloseParen(pair.second);
GetMatcher2()->AddCloseParen(pair.second);
}
}
}
}
MPdtParenFilter(const MPdtParenFilter &filter, bool safe = false)
: filter_(filter.filter_, safe),
parens_(filter.parens_),
expand_(filter.expand_),
keep_parens_(filter.keep_parens_),
fs_(FilterState::NoState()),
stack_(filter.parens_, filter.assignments_),
paren_id_(-1) {}
FilterState Start() const {
return FilterState(filter_.Start(), FilterState2(0));
}
void SetState(StateId s1, StateId s2, const FilterState &fs) {
fs_ = fs;
filter_.SetState(s1, s2, fs_.GetState1());
if (!expand_) return;
const auto paren_id = stack_.Top(fs.GetState2().GetState());
if (paren_id != paren_id_) {
if (paren_id_ != -1) {
GetMatcher1()->RemoveCloseParen(parens_[paren_id_].second);
GetMatcher2()->RemoveCloseParen(parens_[paren_id_].second);
}
paren_id_ = paren_id;
if (paren_id_ != -1) {
GetMatcher1()->AddCloseParen(parens_[paren_id_].second);
GetMatcher2()->AddCloseParen(parens_[paren_id_].second);
}
}
}
FilterState FilterArc(Arc *arc1, Arc *arc2) const {
const auto fs1 = filter_.FilterArc(arc1, arc2);
const auto &fs2 = fs_.GetState2();
if (fs1 == FilterState1::NoState()) return FilterState::NoState();
if (arc1->olabel == kNoLabel && arc2->ilabel) { // arc2 parentheses.
if (keep_parens_) {
arc1->ilabel = arc2->ilabel;
} else if (arc2->ilabel) {
arc2->olabel = arc1->ilabel;
}
return FilterParen(arc2->ilabel, fs1, fs2);
} else if (arc2->ilabel == kNoLabel && arc1->olabel) { // arc1 parentheses
if (keep_parens_) {
arc2->olabel = arc1->olabel;
} else {
arc1->ilabel = arc2->olabel;
}
return FilterParen(arc1->olabel, fs1, fs2);
} else {
return FilterState(fs1, fs2);
}
}
void FilterFinal(Weight *w1, Weight *w2) const {
if (fs_.GetState2().GetState() != 0) *w1 = Weight::Zero();
filter_.FilterFinal(w1, w2);
}
// Returns respective matchers; ownership stays with filter.
Matcher1 *GetMatcher1() { return filter_.GetMatcher1(); }
Matcher2 *GetMatcher2() { return filter_.GetMatcher2(); }
uint64 Properties(uint64 iprops) const {
const auto oprops = filter_.Properties(iprops);
return oprops & kILabelInvariantProperties & kOLabelInvariantProperties;
}
private:
const FilterState FilterParen(Label label, const FilterState1 &fs1,
const FilterState2 &fs2) const {
if (!expand_) return FilterState(fs1, fs2);
const auto stack_id = stack_.Find(fs2.GetState(), label);
if (stack_id < 0) {
return FilterState::NoState();
} else {
return FilterState(fs1, FilterState2(stack_id));
}
}
Filter filter_;
std::vector<std::pair<Label, Label>> parens_;
std::vector<Label> assignments_;
bool expand_; // Expands to FST?
bool keep_parens_; // Retains parentheses in output?
FilterState fs_; // Current filter state.
mutable ParenStack stack_;
ssize_t paren_id_;
};
// Class to setup composition options for MPDT composition. Default is to take
// the MPDT as the first composition argument.
template <class Arc, bool left_pdt = true>
class MPdtComposeFstOptions
: public ComposeFstOptions<Arc, ParenMatcher<Fst<Arc>>,
MPdtParenFilter<AltSequenceComposeFilter<
ParenMatcher<Fst<Arc>> >> > {
public:
using Label = typename Arc::Label;
using MPdtMatcher = ParenMatcher<Fst<Arc>>;
using MPdtFilter = MPdtParenFilter<AltSequenceComposeFilter<MPdtMatcher>>;
using ComposeFstOptions<Arc, MPdtMatcher, MPdtFilter>::matcher1;
using ComposeFstOptions<Arc, MPdtMatcher, MPdtFilter>::matcher2;
using ComposeFstOptions<Arc, MPdtMatcher, MPdtFilter>::filter;
MPdtComposeFstOptions(const Fst<Arc> &ifst1,
const std::vector<std::pair<Label, Label>> &parens,
const std::vector<typename Arc::Label> &assignments,
const Fst<Arc> &ifst2, bool expand = false,
bool keep_parens = true) {
matcher1 = new MPdtMatcher(ifst1, MATCH_OUTPUT, kParenList);
matcher2 = new MPdtMatcher(ifst2, MATCH_INPUT, kParenLoop);
filter = new MPdtFilter(ifst1, ifst2, matcher1, matcher2, &parens,
&assignments, expand, keep_parens);
}
};
// Class to setup composition options for PDT with FST composition.
// Specialization is for the FST as the first composition argument.
template <class Arc>
class MPdtComposeFstOptions<Arc, false>
: public ComposeFstOptions<
Arc, ParenMatcher<Fst<Arc>>,
MPdtParenFilter<SequenceComposeFilter<ParenMatcher<Fst<Arc>> >> > {
public:
using Label = typename Arc::Label;
using MPdtMatcher = ParenMatcher<Fst<Arc>>;
using MPdtFilter = MPdtParenFilter<SequenceComposeFilter<MPdtMatcher>>;
using ComposeFstOptions<Arc, MPdtMatcher, MPdtFilter>::matcher1;
using ComposeFstOptions<Arc, MPdtMatcher, MPdtFilter>::matcher2;
using ComposeFstOptions<Arc, MPdtMatcher, MPdtFilter>::filter;
MPdtComposeFstOptions(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2,
const std::vector<std::pair<Label, Label>> &parens,
const std::vector<typename Arc::Label> &assignments,
bool expand = false, bool keep_parens = true) {
matcher1 = new MPdtMatcher(ifst1, MATCH_OUTPUT, kParenLoop);
matcher2 = new MPdtMatcher(ifst2, MATCH_INPUT, kParenList);
filter = new MPdtFilter(ifst1, ifst2, matcher1, matcher2, &parens,
&assignments, expand, keep_parens);
}
};
struct MPdtComposeOptions {
bool connect; // Connect output?
PdtComposeFilter filter_type; // Which pre-defined filter to use.
explicit MPdtComposeOptions(bool connect = true,
PdtComposeFilter filter_type = PAREN_FILTER)
: connect(connect), filter_type(filter_type) {}
};
// Composes multi-pushdown transducer (MPDT) encoded as an FST (1st arg) and an
// FST (2nd arg) with the result also an MPDT encoded as an FST (3rd arg). In
// theMPDTs, some transitions are labeled with open or close parentheses (and
// associated with a stack). To be interpreted as an MPDT, the parents on each
// stack must balance on a path (see MPdtExpand()). The open-close parenthesis
// label pairs are passed using the parens arguments, and the stack assignments
// are passed using the assignments argument.
template <class Arc>
void Compose(
const Fst<Arc> &ifst1,
const std::vector<std::pair<typename Arc::Label, typename Arc::Label>>
&parens,
const std::vector<typename Arc::Label> &assignments, const Fst<Arc> &ifst2,
MutableFst<Arc> *ofst,
const MPdtComposeOptions &opts = MPdtComposeOptions()) {
bool expand = opts.filter_type != PAREN_FILTER;
bool keep_parens = opts.filter_type != EXPAND_FILTER;
MPdtComposeFstOptions<Arc, true> copts(ifst1, parens, assignments, ifst2,
expand, keep_parens);
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
if (opts.connect) Connect(ofst);
}
// Composes an FST (1st arg) and a multi-pushdown transducer (MPDT) encoded as
// an FST (2nd arg) with the result also an MPDT encoded as an FST (3rd arg).
// In the MPDTs, some transitions are labeled with open or close parentheses
// (and associated with a stack). To be interpreted as an MPDT, the parents on
// each stack must balance on a path (see MPdtExpand()). The open-close
// parenthesis label pairs are passed using the parens arguments, and the stack
// assignments are passed using the assignments argument.
template <class Arc>
void Compose(
const Fst<Arc> &ifst1, const Fst<Arc> &ifst2,
const std::vector<std::pair<typename Arc::Label, typename Arc::Label>>
&parens,
const std::vector<typename Arc::Label> &assignments, MutableFst<Arc> *ofst,
const MPdtComposeOptions &opts = MPdtComposeOptions()) {
bool expand = opts.filter_type != PAREN_FILTER;
bool keep_parens = opts.filter_type != EXPAND_FILTER;
MPdtComposeFstOptions<Arc, false> copts(ifst1, ifst2, parens, assignments,
expand, keep_parens);
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
if (opts.connect) Connect(ofst);
}
} // namespace fst
#endif // FST_EXTENSIONS_MPDT_COMPOSE_H_
| 0 |
coqui_public_repos/TTS/TTS/tts/layers/bark | coqui_public_repos/TTS/TTS/tts/layers/bark/hubert/kmeans_hubert.py | """
Modified HuBERT model without kmeans.
Original author: https://github.com/lucidrains/
Modified by: https://www.github.com/gitmylo/
License: MIT
"""
# Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py
import logging
from pathlib import Path
import torch
from einops import pack, unpack
from torch import nn
from torchaudio.functional import resample
from transformers import HubertModel
def round_down_nearest_multiple(num, divisor):
return num // divisor * divisor
def curtail_to_multiple(t, mult, from_left=False):
data_len = t.shape[-1]
rounded_seq_len = round_down_nearest_multiple(data_len, mult)
seq_slice = slice(None, rounded_seq_len) if not from_left else slice(-rounded_seq_len, None)
return t[..., seq_slice]
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class CustomHubert(nn.Module):
"""
checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
or you can train your own
"""
def __init__(self, checkpoint_path, target_sample_hz=16000, seq_len_multiple_of=None, output_layer=9, device=None):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
self.output_layer = output_layer
if device is not None:
self.to(device)
self.model = HubertModel.from_pretrained("facebook/hubert-base-ls960")
if device is not None:
self.model.to(device)
self.model.eval()
@property
def groups(self):
return 1
@torch.no_grad()
def forward(self, wav_input, flatten=True, input_sample_hz=None):
device = wav_input.device
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
outputs = self.model.forward(
wav_input,
output_hidden_states=True,
)
embed = outputs["hidden_states"][self.output_layer]
embed, packed_shape = pack([embed], "* d")
codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device)
if flatten:
return codebook_indices
(codebook_indices,) = unpack(codebook_indices, packed_shape, "*")
return codebook_indices
| 0 |
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external | coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidxml/rapidxml.hpp | #ifndef CEREAL_RAPIDXML_HPP_INCLUDED
#define CEREAL_RAPIDXML_HPP_INCLUDED
// Copyright (C) 2006, 2009 Marcin Kalicinski
// Version 1.13
// Revision $DateTime: 2009/05/13 01:46:17 $
// If standard library is disabled, user must provide implementations of required functions and typedefs
#if !defined(CEREAL_RAPIDXML_NO_STDLIB)
#include <cstdlib> // For std::size_t
#include <cassert> // For assert
#include <new> // For placement new
#endif
// On MSVC, disable "conditional expression is constant" warning (level 4).
// This warning is almost impossible to avoid with certain types of templated code
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4127) // Conditional expression is constant
#pragma warning(disable:4100) // unreferenced formal parameter
#endif
///////////////////////////////////////////////////////////////////////////
// CEREAL_RAPIDXML_PARSE_ERROR
#if defined(CEREAL_RAPIDXML_NO_EXCEPTIONS)
#define CEREAL_RAPIDXML_PARSE_ERROR(what, where) { parse_error_handler(what, where); assert(0); }
namespace cereal {
namespace rapidxml
{
//! When exceptions are disabled by defining CEREAL_RAPIDXML_NO_EXCEPTIONS,
//! this function is called to notify user about the error.
//! It must be defined by the user.
//! <br><br>
//! This function cannot return. If it does, the results are undefined.
//! <br><br>
//! A very simple definition might look like that:
//! <pre>
//! void %rapidxml::%parse_error_handler(const char *what, void *where)
//! {
//! std::cout << "Parse error: " << what << "\n";
//! std::abort();
//! }
//! </pre>
//! \param what Human readable description of the error.
//! \param where Pointer to character data where error was detected.
void parse_error_handler(const char *what, void *where);
}
} // end namespace cereal
#else
#include <exception> // For std::exception
#define CEREAL_RAPIDXML_PARSE_ERROR(what, where) throw parse_error(what, where)
namespace cereal {
namespace rapidxml
{
//! Parse error exception.
//! This exception is thrown by the parser when an error occurs.
//! Use what() function to get human-readable error message.
//! Use where() function to get a pointer to position within source text where error was detected.
//! <br><br>
//! If throwing exceptions by the parser is undesirable,
//! it can be disabled by defining CEREAL_RAPIDXML_NO_EXCEPTIONS macro before rapidxml.hpp is included.
//! This will cause the parser to call rapidxml::parse_error_handler() function instead of throwing an exception.
//! This function must be defined by the user.
//! <br><br>
//! This class derives from <code>std::exception</code> class.
class parse_error: public std::exception
{
public:
//! Constructs parse error
parse_error(const char *what_, void *where_)
: m_what(what_)
, m_where(where_)
{
}
//! Gets human readable description of error.
//! \return Pointer to null terminated description of the error.
virtual const char *what() const CEREAL_NOEXCEPT override
{
return m_what;
}
//! Gets pointer to character data where error happened.
//! Ch should be the same as char type of xml_document that produced the error.
//! \return Pointer to location within the parsed string where error occured.
template<class Ch>
Ch *where() const
{
return reinterpret_cast<Ch *>(m_where);
}
private:
const char *m_what;
void *m_where;
};
}
} // end namespace cereal
#endif
///////////////////////////////////////////////////////////////////////////
// Pool sizes
#ifndef CEREAL_RAPIDXML_STATIC_POOL_SIZE
// Size of static memory block of memory_pool.
// Define CEREAL_RAPIDXML_STATIC_POOL_SIZE before including rapidxml.hpp if you want to override the default value.
// No dynamic memory allocations are performed by memory_pool until static memory is exhausted.
#define CEREAL_RAPIDXML_STATIC_POOL_SIZE (64 * 1024)
#endif
#ifndef CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE
// Size of dynamic memory block of memory_pool.
// Define CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE before including rapidxml.hpp if you want to override the default value.
// After the static block is exhausted, dynamic blocks with approximately this size are allocated by memory_pool.
#define CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE (64 * 1024)
#endif
#ifndef CEREAL_RAPIDXML_ALIGNMENT
// Memory allocation alignment.
// Define CEREAL_RAPIDXML_ALIGNMENT before including rapidxml.hpp if you want to override the default value, which is the size of pointer.
// All memory allocations for nodes, attributes and strings will be aligned to this value.
// This must be a power of 2 and at least 1, otherwise memory_pool will not work.
#define CEREAL_RAPIDXML_ALIGNMENT sizeof(void *)
#endif
namespace cereal {
namespace rapidxml
{
// Forward declarations
template<class Ch> class xml_node;
template<class Ch> class xml_attribute;
template<class Ch> class xml_document;
//! Enumeration listing all node types produced by the parser.
//! Use xml_node::type() function to query node type.
enum node_type
{
node_document, //!< A document node. Name and value are empty.
node_element, //!< An element node. Name contains element name. Value contains text of first data node.
node_data, //!< A data node. Name is empty. Value contains data text.
node_cdata, //!< A CDATA node. Name is empty. Value contains data text.
node_comment, //!< A comment node. Name is empty. Value contains comment text.
node_declaration, //!< A declaration node. Name and value are empty. Declaration parameters (version, encoding and standalone) are in node attributes.
node_doctype, //!< A DOCTYPE node. Name is empty. Value contains DOCTYPE text.
node_pi //!< A PI node. Name contains target. Value contains instructions.
};
///////////////////////////////////////////////////////////////////////
// Parsing flags
//! Parse flag instructing the parser to not create data nodes.
//! Text of first data node will still be placed in value of parent element, unless rapidxml::parse_no_element_values flag is also specified.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_data_nodes = 0x1;
//! Parse flag instructing the parser to not use text of first data node as a value of parent element.
//! Can be combined with other flags by use of | operator.
//! Note that child data nodes of element node take precendence over its value when printing.
//! That is, if element has one or more child data nodes <em>and</em> a value, the value will be ignored.
//! Use rapidxml::parse_no_data_nodes flag to prevent creation of data nodes if you want to manipulate data using values of elements.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_element_values = 0x2;
//! Parse flag instructing the parser to not place zero terminators after strings in the source text.
//! By default zero terminators are placed, modifying source text.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_string_terminators = 0x4;
//! Parse flag instructing the parser to not translate entities in the source text.
//! By default entities are translated, modifying source text.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_entity_translation = 0x8;
//! Parse flag instructing the parser to disable UTF-8 handling and assume plain 8 bit characters.
//! By default, UTF-8 handling is enabled.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_no_utf8 = 0x10;
//! Parse flag instructing the parser to create XML declaration node.
//! By default, declaration node is not created.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_declaration_node = 0x20;
//! Parse flag instructing the parser to create comments nodes.
//! By default, comment nodes are not created.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_comment_nodes = 0x40;
//! Parse flag instructing the parser to create DOCTYPE node.
//! By default, doctype node is not created.
//! Although W3C specification allows at most one DOCTYPE node, RapidXml will silently accept documents with more than one.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_doctype_node = 0x80;
//! Parse flag instructing the parser to create PI nodes.
//! By default, PI nodes are not created.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_pi_nodes = 0x100;
//! Parse flag instructing the parser to validate closing tag names.
//! If not set, name inside closing tag is irrelevant to the parser.
//! By default, closing tags are not validated.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_validate_closing_tags = 0x200;
//! Parse flag instructing the parser to trim all leading and trailing whitespace of data nodes.
//! By default, whitespace is not trimmed.
//! This flag does not cause the parser to modify source text.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_trim_whitespace = 0x400;
//! Parse flag instructing the parser to condense all whitespace runs of data nodes to a single space character.
//! Trimming of leading and trailing whitespace of data is controlled by rapidxml::parse_trim_whitespace flag.
//! By default, whitespace is not normalized.
//! If this flag is specified, source text will be modified.
//! Can be combined with other flags by use of | operator.
//! <br><br>
//! See xml_document::parse() function.
const int parse_normalize_whitespace = 0x800;
// Compound flags
//! Parse flags which represent default behaviour of the parser.
//! This is always equal to 0, so that all other flags can be simply ored together.
//! Normally there is no need to inconveniently disable flags by anding with their negated (~) values.
//! This also means that meaning of each flag is a <i>negation</i> of the default setting.
//! For example, if flag name is rapidxml::parse_no_utf8, it means that utf-8 is <i>enabled</i> by default,
//! and using the flag will disable it.
//! <br><br>
//! See xml_document::parse() function.
const int parse_default = 0;
//! A combination of parse flags that forbids any modifications of the source text.
//! This also results in faster parsing. However, note that the following will occur:
//! <ul>
//! <li>names and values of nodes will not be zero terminated, you have to use xml_base::name_size() and xml_base::value_size() functions to determine where name and value ends</li>
//! <li>entities will not be translated</li>
//! <li>whitespace will not be normalized</li>
//! </ul>
//! See xml_document::parse() function.
const int parse_non_destructive = parse_no_string_terminators | parse_no_entity_translation;
//! A combination of parse flags resulting in fastest possible parsing, without sacrificing important data.
//! <br><br>
//! See xml_document::parse() function.
const int parse_fastest = parse_non_destructive | parse_no_data_nodes;
//! A combination of parse flags resulting in largest amount of data being extracted.
//! This usually results in slowest parsing.
//! <br><br>
//! See xml_document::parse() function.
const int parse_full = parse_declaration_node | parse_comment_nodes | parse_doctype_node | parse_pi_nodes | parse_validate_closing_tags;
///////////////////////////////////////////////////////////////////////
// Internals
//! \cond internal
namespace internal
{
// Struct that contains lookup tables for the parser
// It must be a template to allow correct linking (because it has static data members, which are defined in a header file).
template<int Dummy>
struct lookup_tables
{
static const unsigned char lookup_whitespace[256]; // Whitespace table
static const unsigned char lookup_node_name[256]; // Node name table
static const unsigned char lookup_text[256]; // Text table
static const unsigned char lookup_text_pure_no_ws[256]; // Text table
static const unsigned char lookup_text_pure_with_ws[256]; // Text table
static const unsigned char lookup_attribute_name[256]; // Attribute name table
static const unsigned char lookup_attribute_data_1[256]; // Attribute data table with single quote
static const unsigned char lookup_attribute_data_1_pure[256]; // Attribute data table with single quote
static const unsigned char lookup_attribute_data_2[256]; // Attribute data table with double quotes
static const unsigned char lookup_attribute_data_2_pure[256]; // Attribute data table with double quotes
static const unsigned char lookup_digits[256]; // Digits
static const unsigned char lookup_upcase[256]; // To uppercase conversion table for ASCII characters
};
// Find length of the string
template<class Ch>
inline std::size_t measure(const Ch *p)
{
const Ch *tmp = p;
while (*tmp)
++tmp;
return static_cast<std::size_t>(tmp - p);
}
// Compare strings for equality
template<class Ch>
inline bool compare(const Ch *p1, std::size_t size1, const Ch *p2, std::size_t size2, bool case_sensitive)
{
if (size1 != size2)
return false;
if (case_sensitive)
{
for (const Ch *end = p1 + size1; p1 < end; ++p1, ++p2)
if (*p1 != *p2)
return false;
}
else
{
for (const Ch *end = p1 + size1; p1 < end; ++p1, ++p2)
if (lookup_tables<0>::lookup_upcase[static_cast<unsigned char>(*p1)] != lookup_tables<0>::lookup_upcase[static_cast<unsigned char>(*p2)])
return false;
}
return true;
}
template<class Ch>
inline bool preserve_space(xml_node<Ch>* node)
{
const Ch preserve_value[] = { Ch('p'), Ch('r'), Ch('e'), Ch('s'), Ch('e'), Ch('r'), Ch('v'), Ch('e') };
const xml_attribute<Ch>* space = node->first_attribute("xml:space");
return space && internal::compare(space->value(), space->value_size(), preserve_value, sizeof(preserve_value) / sizeof(Ch), true);
}
}
//! \endcond
///////////////////////////////////////////////////////////////////////
// Memory pool
//! This class is used by the parser to create new nodes and attributes, without overheads of dynamic memory allocation.
//! In most cases, you will not need to use this class directly.
//! However, if you need to create nodes manually or modify names/values of nodes,
//! you are encouraged to use memory_pool of relevant xml_document to allocate the memory.
//! Not only is this faster than allocating them by using <code>new</code> operator,
//! but also their lifetime will be tied to the lifetime of document,
//! possibly simplyfing memory management.
//! <br><br>
//! Call allocate_node() or allocate_attribute() functions to obtain new nodes or attributes from the pool.
//! You can also call allocate_string() function to allocate strings.
//! Such strings can then be used as names or values of nodes without worrying about their lifetime.
//! Note that there is no <code>free()</code> function -- all allocations are freed at once when clear() function is called,
//! or when the pool is destroyed.
//! <br><br>
//! It is also possible to create a standalone memory_pool, and use it
//! to allocate nodes, whose lifetime will not be tied to any document.
//! <br><br>
//! Pool maintains <code>CEREAL_RAPIDXML_STATIC_POOL_SIZE</code> bytes of statically allocated memory.
//! Until static memory is exhausted, no dynamic memory allocations are done.
//! When static memory is exhausted, pool allocates additional blocks of memory of size <code>CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE</code> each,
//! by using global <code>new[]</code> and <code>delete[]</code> operators.
//! This behaviour can be changed by setting custom allocation routines.
//! Use set_allocator() function to set them.
//! <br><br>
//! Allocations for nodes, attributes and strings are aligned at <code>CEREAL_RAPIDXML_ALIGNMENT</code> bytes.
//! This value defaults to the size of pointer on target architecture.
//! <br><br>
//! To obtain absolutely top performance from the parser,
//! it is important that all nodes are allocated from a single, contiguous block of memory.
//! Otherwise, cache misses when jumping between two (or more) disjoint blocks of memory can slow down parsing quite considerably.
//! If required, you can tweak <code>CEREAL_RAPIDXML_STATIC_POOL_SIZE</code>, <code>CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE</code> and <code>CEREAL_RAPIDXML_ALIGNMENT</code>
//! to obtain best wasted memory to performance compromise.
//! To do it, define their values before rapidxml.hpp file is included.
//! \tparam Ch Character type of created nodes.
template<class Ch = char>
class memory_pool
{
public:
//! \cond internal
typedef void *(alloc_func)(std::size_t); // Type of user-defined function used to allocate memory
typedef void (free_func)(void *); // Type of user-defined function used to free memory
//! \endcond
//! Constructs empty pool with default allocator functions.
memory_pool()
: m_alloc_func(0)
, m_free_func(0)
{
init();
}
//! Destroys pool and frees all the memory.
//! This causes memory occupied by nodes allocated by the pool to be freed.
//! Nodes allocated from the pool are no longer valid.
~memory_pool()
{
clear();
}
//! Allocates a new node from the pool, and optionally assigns name and value to it.
//! If the allocation request cannot be accomodated, this function will throw <code>std::bad_alloc</code>.
//! If exceptions are disabled by defining CEREAL_RAPIDXML_NO_EXCEPTIONS, this function
//! will call rapidxml::parse_error_handler() function.
//! \param type Type of node to create.
//! \param name Name to assign to the node, or 0 to assign no name.
//! \param value Value to assign to the node, or 0 to assign no value.
//! \param name_size Size of name to assign, or 0 to automatically calculate size from name string.
//! \param value_size Size of value to assign, or 0 to automatically calculate size from value string.
//! \return Pointer to allocated node. This pointer will never be NULL.
xml_node<Ch> *allocate_node(node_type type,
const Ch *name = 0, const Ch *value = 0,
std::size_t name_size = 0, std::size_t value_size = 0)
{
void *memory = allocate_aligned(sizeof(xml_node<Ch>));
xml_node<Ch> *node = new(memory) xml_node<Ch>(type);
if (name)
{
if (name_size > 0)
node->name(name, name_size);
else
node->name(name);
}
if (value)
{
if (value_size > 0)
node->value(value, value_size);
else
node->value(value);
}
return node;
}
//! Allocates a new attribute from the pool, and optionally assigns name and value to it.
//! If the allocation request cannot be accomodated, this function will throw <code>std::bad_alloc</code>.
//! If exceptions are disabled by defining CEREAL_RAPIDXML_NO_EXCEPTIONS, this function
//! will call rapidxml::parse_error_handler() function.
//! \param name Name to assign to the attribute, or 0 to assign no name.
//! \param value Value to assign to the attribute, or 0 to assign no value.
//! \param name_size Size of name to assign, or 0 to automatically calculate size from name string.
//! \param value_size Size of value to assign, or 0 to automatically calculate size from value string.
//! \return Pointer to allocated attribute. This pointer will never be NULL.
xml_attribute<Ch> *allocate_attribute(const Ch *name = 0, const Ch *value = 0,
std::size_t name_size = 0, std::size_t value_size = 0)
{
void *memory = allocate_aligned(sizeof(xml_attribute<Ch>));
xml_attribute<Ch> *attribute = new(memory) xml_attribute<Ch>;
if (name)
{
if (name_size > 0)
attribute->name(name, name_size);
else
attribute->name(name);
}
if (value)
{
if (value_size > 0)
attribute->value(value, value_size);
else
attribute->value(value);
}
return attribute;
}
//! Allocates a char array of given size from the pool, and optionally copies a given string to it.
//! If the allocation request cannot be accomodated, this function will throw <code>std::bad_alloc</code>.
//! If exceptions are disabled by defining CEREAL_RAPIDXML_NO_EXCEPTIONS, this function
//! will call rapidxml::parse_error_handler() function.
//! \param source String to initialize the allocated memory with, or 0 to not initialize it.
//! \param size Number of characters to allocate, or zero to calculate it automatically from source string length; if size is 0, source string must be specified and null terminated.
//! \return Pointer to allocated char array. This pointer will never be NULL.
Ch *allocate_string(const Ch *source = 0, std::size_t size = 0)
{
assert(source || size); // Either source or size (or both) must be specified
if (size == 0)
size = internal::measure(source) + 1;
Ch *result = static_cast<Ch *>(allocate_aligned(size * sizeof(Ch)));
if (source)
for (std::size_t i = 0; i < size; ++i)
result[i] = source[i];
return result;
}
//! Clones an xml_node and its hierarchy of child nodes and attributes.
//! Nodes and attributes are allocated from this memory pool.
//! Names and values are not cloned, they are shared between the clone and the source.
//! Result node can be optionally specified as a second parameter,
//! in which case its contents will be replaced with cloned source node.
//! This is useful when you want to clone entire document.
//! \param source Node to clone.
//! \param result Node to put results in, or 0 to automatically allocate result node
//! \return Pointer to cloned node. This pointer will never be NULL.
xml_node<Ch> *clone_node(const xml_node<Ch> *source, xml_node<Ch> *result = 0)
{
// Prepare result node
if (result)
{
result->remove_all_attributes();
result->remove_all_nodes();
result->type(source->type());
}
else
result = allocate_node(source->type());
// Clone name and value
result->name(source->name(), source->name_size());
result->value(source->value(), source->value_size());
// Clone child nodes and attributes
for (xml_node<Ch> *child = source->first_node(); child; child = child->next_sibling())
result->append_node(clone_node(child));
for (xml_attribute<Ch> *attr = source->first_attribute(); attr; attr = attr->next_attribute())
result->append_attribute(allocate_attribute(attr->name(), attr->value(), attr->name_size(), attr->value_size()));
return result;
}
//! Clears the pool.
//! This causes memory occupied by nodes allocated by the pool to be freed.
//! Any nodes or strings allocated from the pool will no longer be valid.
void clear()
{
while (m_begin != m_static_memory)
{
char *previous_begin = reinterpret_cast<header *>(align(m_begin))->previous_begin;
if (m_free_func)
m_free_func(m_begin);
else
delete[] m_begin;
m_begin = previous_begin;
}
init();
}
//! Sets or resets the user-defined memory allocation functions for the pool.
//! This can only be called when no memory is allocated from the pool yet, otherwise results are undefined.
//! Allocation function must not return invalid pointer on failure. It should either throw,
//! stop the program, or use <code>longjmp()</code> function to pass control to other place of program.
//! If it returns invalid pointer, results are undefined.
//! <br><br>
//! User defined allocation functions must have the following forms:
//! <br><code>
//! <br>void *allocate(std::size_t size);
//! <br>void free(void *pointer);
//! </code><br>
//! \param af Allocation function, or 0 to restore default function
//! \param ff Free function, or 0 to restore default function
void set_allocator(alloc_func *af, free_func *ff)
{
assert(m_begin == m_static_memory && m_ptr == align(m_begin)); // Verify that no memory is allocated yet
m_alloc_func = af;
m_free_func = ff;
}
private:
struct header
{
char *previous_begin;
};
void init()
{
m_begin = m_static_memory;
m_ptr = align(m_begin);
m_end = m_static_memory + sizeof(m_static_memory);
}
char *align(char *ptr)
{
std::size_t alignment = ((CEREAL_RAPIDXML_ALIGNMENT - (std::size_t(ptr) & (CEREAL_RAPIDXML_ALIGNMENT - 1))) & (CEREAL_RAPIDXML_ALIGNMENT - 1));
return ptr + alignment;
}
char *allocate_raw(std::size_t size)
{
// Allocate
void *memory;
if (m_alloc_func) // Allocate memory using either user-specified allocation function or global operator new[]
{
memory = m_alloc_func(size);
assert(memory); // Allocator is not allowed to return 0, on failure it must either throw, stop the program or use longjmp
}
else
{
memory = new char[size];
#ifdef CEREAL_RAPIDXML_NO_EXCEPTIONS
if (!memory) // If exceptions are disabled, verify memory allocation, because new will not be able to throw bad_alloc
CEREAL_RAPIDXML_PARSE_ERROR("out of memory", 0);
#endif
}
return static_cast<char *>(memory);
}
void *allocate_aligned(std::size_t size)
{
// Calculate aligned pointer
char *result = align(m_ptr);
// If not enough memory left in current pool, allocate a new pool
if (result + size > m_end)
{
// Calculate required pool size (may be bigger than CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE)
std::size_t pool_size = CEREAL_RAPIDXML_DYNAMIC_POOL_SIZE;
if (pool_size < size)
pool_size = size;
// Allocate
std::size_t alloc_size = sizeof(header) + (2 * CEREAL_RAPIDXML_ALIGNMENT - 2) + pool_size; // 2 alignments required in worst case: one for header, one for actual allocation
char *raw_memory = allocate_raw(alloc_size);
// Setup new pool in allocated memory
char *pool = align(raw_memory);
header *new_header = reinterpret_cast<header *>(pool);
new_header->previous_begin = m_begin;
m_begin = raw_memory;
m_ptr = pool + sizeof(header);
m_end = raw_memory + alloc_size;
// Calculate aligned pointer again using new pool
result = align(m_ptr);
}
// Update pool and return aligned pointer
m_ptr = result + size;
return result;
}
char *m_begin; // Start of raw memory making up current pool
char *m_ptr; // First free byte in current pool
char *m_end; // One past last available byte in current pool
char m_static_memory[CEREAL_RAPIDXML_STATIC_POOL_SIZE]; // Static raw memory
alloc_func *m_alloc_func; // Allocator function, or 0 if default is to be used
free_func *m_free_func; // Free function, or 0 if default is to be used
};
///////////////////////////////////////////////////////////////////////////
// XML base
//! Base class for xml_node and xml_attribute implementing common functions:
//! name(), name_size(), value(), value_size() and parent().
//! \tparam Ch Character type to use
template<class Ch = char>
class xml_base
{
public:
///////////////////////////////////////////////////////////////////////////
// Construction & destruction
// Construct a base with empty name, value and parent
xml_base()
: m_name(0)
, m_value(0)
, m_parent(0)
{
}
///////////////////////////////////////////////////////////////////////////
// Node data access
//! Gets name of the node.
//! Interpretation of name depends on type of node.
//! Note that name will not be zero-terminated if rapidxml::parse_no_string_terminators option was selected during parse.
//! <br><br>
//! Use name_size() function to determine length of the name.
//! \return Name of node, or empty string if node has no name.
Ch *name() const
{
return m_name ? m_name : nullstr();
}
//! Gets size of node name, not including terminator character.
//! This function works correctly irrespective of whether name is or is not zero terminated.
//! \return Size of node name, in characters.
std::size_t name_size() const
{
return m_name ? m_name_size : 0;
}
//! Gets value of node.
//! Interpretation of value depends on type of node.
//! Note that value will not be zero-terminated if rapidxml::parse_no_string_terminators option was selected during parse.
//! <br><br>
//! Use value_size() function to determine length of the value.
//! \return Value of node, or empty string if node has no value.
Ch *value() const
{
return m_value ? m_value : nullstr();
}
//! Gets size of node value, not including terminator character.
//! This function works correctly irrespective of whether value is or is not zero terminated.
//! \return Size of node value, in characters.
std::size_t value_size() const
{
return m_value ? m_value_size : 0;
}
///////////////////////////////////////////////////////////////////////////
// Node modification
//! Sets name of node to a non zero-terminated string.
//! See \ref ownership_of_strings.
//! <br><br>
//! Note that node does not own its name or value, it only stores a pointer to it.
//! It will not delete or otherwise free the pointer on destruction.
//! It is reponsibility of the user to properly manage lifetime of the string.
//! The easiest way to achieve it is to use memory_pool of the document to allocate the string -
//! on destruction of the document the string will be automatically freed.
//! <br><br>
//! Size of name must be specified separately, because name does not have to be zero terminated.
//! Use name(const Ch *) function to have the length automatically calculated (string must be zero terminated).
//! \param name_ Name of node to set. Does not have to be zero terminated.
//! \param size Size of name, in characters. This does not include zero terminator, if one is present.
void name(const Ch *name_, std::size_t size)
{
m_name = const_cast<Ch *>(name_);
m_name_size = size;
}
//! Sets name of node to a zero-terminated string.
//! See also \ref ownership_of_strings and xml_node::name(const Ch *, std::size_t).
//! \param name_ Name of node to set. Must be zero terminated.
void name(const Ch *name_)
{
this->name(name_, internal::measure(name_));
}
//! Sets value of node to a non zero-terminated string.
//! See \ref ownership_of_strings.
//! <br><br>
//! Note that node does not own its name or value, it only stores a pointer to it.
//! It will not delete or otherwise free the pointer on destruction.
//! It is reponsibility of the user to properly manage lifetime of the string.
//! The easiest way to achieve it is to use memory_pool of the document to allocate the string -
//! on destruction of the document the string will be automatically freed.
//! <br><br>
//! Size of value must be specified separately, because it does not have to be zero terminated.
//! Use value(const Ch *) function to have the length automatically calculated (string must be zero terminated).
//! <br><br>
//! If an element has a child node of type node_data, it will take precedence over element value when printing.
//! If you want to manipulate data of elements using values, use parser flag rapidxml::parse_no_data_nodes to prevent creation of data nodes by the parser.
//! \param value_ value of node to set. Does not have to be zero terminated.
//! \param size Size of value, in characters. This does not include zero terminator, if one is present.
void value(const Ch *value_, std::size_t size)
{
m_value = const_cast<Ch *>(value_);
m_value_size = size;
}
//! Sets value of node to a zero-terminated string.
//! See also \ref ownership_of_strings and xml_node::value(const Ch *, std::size_t).
//! \param value_ Vame of node to set. Must be zero terminated.
void value(const Ch *value_)
{
this->value(value_, internal::measure(value_));
}
///////////////////////////////////////////////////////////////////////////
// Related nodes access
//! Gets node parent.
//! \return Pointer to parent node, or 0 if there is no parent.
xml_node<Ch> *parent() const
{
return m_parent;
}
protected:
// Return empty string
static Ch *nullstr()
{
static Ch zero = Ch('\0');
return &zero;
}
Ch *m_name; // Name of node, or 0 if no name
Ch *m_value; // Value of node, or 0 if no value
std::size_t m_name_size; // Length of node name, or undefined of no name
std::size_t m_value_size; // Length of node value, or undefined if no value
xml_node<Ch> *m_parent; // Pointer to parent node, or 0 if none
};
//! Class representing attribute node of XML document.
//! Each attribute has name and value strings, which are available through name() and value() functions (inherited from xml_base).
//! Note that after parse, both name and value of attribute will point to interior of source text used for parsing.
//! Thus, this text must persist in memory for the lifetime of attribute.
//! \tparam Ch Character type to use.
template<class Ch = char>
class xml_attribute: public xml_base<Ch>
{
friend class xml_node<Ch>;
public:
///////////////////////////////////////////////////////////////////////////
// Construction & destruction
//! Constructs an empty attribute with the specified type.
//! Consider using memory_pool of appropriate xml_document if allocating attributes manually.
xml_attribute()
{
}
///////////////////////////////////////////////////////////////////////////
// Related nodes access
//! Gets document of which attribute is a child.
//! \return Pointer to document that contains this attribute, or 0 if there is no parent document.
xml_document<Ch> *document() const
{
if (xml_node<Ch> *node = this->parent())
{
while (node->parent())
node = node->parent();
return node->type() == node_document ? static_cast<xml_document<Ch> *>(node) : 0;
}
else
return 0;
}
//! Gets previous attribute, optionally matching attribute name.
//! \param name Name of attribute to find, or 0 to return previous attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *previous_attribute(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_attribute<Ch> *attribute = m_prev_attribute; attribute; attribute = attribute->m_prev_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name, name_size, case_sensitive))
return attribute;
return 0;
}
else
return this->m_parent ? m_prev_attribute : 0;
}
//! Gets next attribute, optionally matching attribute name.
//! \param name_ Name of attribute to find, or 0 to return next attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size_ Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *next_attribute(const Ch *name_ = 0, std::size_t name_size_ = 0, bool case_sensitive = true) const
{
if (name_)
{
if (name_size_ == 0)
name_size_ = internal::measure(name_);
for (xml_attribute<Ch> *attribute = m_next_attribute; attribute; attribute = attribute->m_next_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name_, name_size_, case_sensitive))
return attribute;
return 0;
}
else
return this->m_parent ? m_next_attribute : 0;
}
private:
xml_attribute<Ch> *m_prev_attribute; // Pointer to previous sibling of attribute, or 0 if none; only valid if parent is non-zero
xml_attribute<Ch> *m_next_attribute; // Pointer to next sibling of attribute, or 0 if none; only valid if parent is non-zero
};
///////////////////////////////////////////////////////////////////////////
// XML node
//! Class representing a node of XML document.
//! Each node may have associated name and value strings, which are available through name() and value() functions.
//! Interpretation of name and value depends on type of the node.
//! Type of node can be determined by using type() function.
//! <br><br>
//! Note that after parse, both name and value of node, if any, will point interior of source text used for parsing.
//! Thus, this text must persist in the memory for the lifetime of node.
//! \tparam Ch Character type to use.
template<class Ch = char>
class xml_node: public xml_base<Ch>
{
public:
///////////////////////////////////////////////////////////////////////////
// Construction & destruction
//! Constructs an empty node with the specified type.
//! Consider using memory_pool of appropriate document to allocate nodes manually.
//! \param type_ Type of node to construct.
xml_node(node_type type_)
: m_type(type_)
, m_first_node(0)
, m_first_attribute(0)
{
}
///////////////////////////////////////////////////////////////////////////
// Node data access
//! Gets type of node.
//! \return Type of node.
node_type type() const
{
return m_type;
}
///////////////////////////////////////////////////////////////////////////
// Related nodes access
//! Gets document of which node is a child.
//! \return Pointer to document that contains this node, or 0 if there is no parent document.
xml_document<Ch> *document() const
{
xml_node<Ch> *node = const_cast<xml_node<Ch> *>(this);
while (node->parent())
node = node->parent();
return node->type() == node_document ? static_cast<xml_document<Ch> *>(node) : 0;
}
//! Gets first child node, optionally matching node name.
//! \param name_ Name of child to find, or 0 to return first child regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size_ Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found child, or 0 if not found.
xml_node<Ch> *first_node(const Ch *name_ = 0, std::size_t name_size_ = 0, bool case_sensitive = true) const
{
if (name_)
{
if (name_size_ == 0)
name_size_ = internal::measure(name_);
for (xml_node<Ch> *child = m_first_node; child; child = child->next_sibling())
if (internal::compare(child->name(), child->name_size(), name_, name_size_, case_sensitive))
return child;
return 0;
}
else
return m_first_node;
}
//! Gets last child node, optionally matching node name.
//! Behaviour is undefined if node has no children.
//! Use first_node() to test if node has children.
//! \param name Name of child to find, or 0 to return last child regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found child, or 0 if not found.
xml_node<Ch> *last_node(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
assert(m_first_node); // Cannot query for last child if node has no children
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_node<Ch> *child = m_last_node; child; child = child->previous_sibling())
if (internal::compare(child->name(), child->name_size(), name, name_size, case_sensitive))
return child;
return 0;
}
else
return m_last_node;
}
//! Gets previous sibling node, optionally matching node name.
//! Behaviour is undefined if node has no parent.
//! Use parent() to test if node has a parent.
//! \param name Name of sibling to find, or 0 to return previous sibling regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found sibling, or 0 if not found.
xml_node<Ch> *previous_sibling(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
assert(this->m_parent); // Cannot query for siblings if node has no parent
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_node<Ch> *sibling = m_prev_sibling; sibling; sibling = sibling->m_prev_sibling)
if (internal::compare(sibling->name(), sibling->name_size(), name, name_size, case_sensitive))
return sibling;
return 0;
}
else
return m_prev_sibling;
}
//! Gets next sibling node, optionally matching node name.
//! Behaviour is undefined if node has no parent.
//! Use parent() to test if node has a parent.
//! \param name_ Name of sibling to find, or 0 to return next sibling regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size_ Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found sibling, or 0 if not found.
xml_node<Ch> *next_sibling(const Ch *name_ = 0, std::size_t name_size_ = 0, bool case_sensitive = true) const
{
assert(this->m_parent); // Cannot query for siblings if node has no parent
if (name_)
{
if (name_size_ == 0)
name_size_ = internal::measure(name_);
for (xml_node<Ch> *sibling = m_next_sibling; sibling; sibling = sibling->m_next_sibling)
if (internal::compare(sibling->name(), sibling->name_size(), name_, name_size_, case_sensitive))
return sibling;
return 0;
}
else
return m_next_sibling;
}
//! Gets first attribute of node, optionally matching attribute name.
//! \param name_ Name of attribute to find, or 0 to return first attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size_ Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *first_attribute(const Ch *name_ = 0, std::size_t name_size_ = 0, bool case_sensitive = true) const
{
if (name_)
{
if (name_size_ == 0)
name_size_ = internal::measure(name_);
for (xml_attribute<Ch> *attribute = m_first_attribute; attribute; attribute = attribute->m_next_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name_, name_size_, case_sensitive))
return attribute;
return 0;
}
else
return m_first_attribute;
}
//! Gets last attribute of node, optionally matching attribute name.
//! \param name Name of attribute to find, or 0 to return last attribute regardless of its name; this string doesn't have to be zero-terminated if name_size is non-zero
//! \param name_size Size of name, in characters, or 0 to have size calculated automatically from string
//! \param case_sensitive Should name comparison be case-sensitive; non case-sensitive comparison works properly only for ASCII characters
//! \return Pointer to found attribute, or 0 if not found.
xml_attribute<Ch> *last_attribute(const Ch *name = 0, std::size_t name_size = 0, bool case_sensitive = true) const
{
if (name)
{
if (name_size == 0)
name_size = internal::measure(name);
for (xml_attribute<Ch> *attribute = m_last_attribute; attribute; attribute = attribute->m_prev_attribute)
if (internal::compare(attribute->name(), attribute->name_size(), name, name_size, case_sensitive))
return attribute;
return 0;
}
else
return m_first_attribute ? m_last_attribute : 0;
}
///////////////////////////////////////////////////////////////////////////
// Node modification
//! Sets type of node.
//! \param type_ Type of node to set.
void type(node_type type_)
{
m_type = type_;
}
///////////////////////////////////////////////////////////////////////////
// Node manipulation
//! Prepends a new child node.
//! The prepended child becomes the first child, and all existing children are moved one position back.
//! \param child Node to prepend.
void prepend_node(xml_node<Ch> *child)
{
assert(child && !child->parent() && child->type() != node_document);
if (first_node())
{
child->m_next_sibling = m_first_node;
m_first_node->m_prev_sibling = child;
}
else
{
child->m_next_sibling = 0;
m_last_node = child;
}
m_first_node = child;
child->m_parent = this;
child->m_prev_sibling = 0;
}
//! Appends a new child node.
//! The appended child becomes the last child.
//! \param child Node to append.
void append_node(xml_node<Ch> *child)
{
assert(child && !child->parent() && child->type() != node_document);
if (first_node())
{
child->m_prev_sibling = m_last_node;
m_last_node->m_next_sibling = child;
}
else
{
child->m_prev_sibling = 0;
m_first_node = child;
}
m_last_node = child;
child->m_parent = this;
child->m_next_sibling = 0;
}
//! Inserts a new child node at specified place inside the node.
//! All children after and including the specified node are moved one position back.
//! \param where Place where to insert the child, or 0 to insert at the back.
//! \param child Node to insert.
void insert_node(xml_node<Ch> *where, xml_node<Ch> *child)
{
assert(!where || where->parent() == this);
assert(child && !child->parent() && child->type() != node_document);
if (where == m_first_node)
prepend_node(child);
else if (where == 0)
append_node(child);
else
{
child->m_prev_sibling = where->m_prev_sibling;
child->m_next_sibling = where;
where->m_prev_sibling->m_next_sibling = child;
where->m_prev_sibling = child;
child->m_parent = this;
}
}
//! Removes first child node.
//! If node has no children, behaviour is undefined.
//! Use first_node() to test if node has children.
void remove_first_node()
{
assert(first_node());
xml_node<Ch> *child = m_first_node;
m_first_node = child->m_next_sibling;
if (child->m_next_sibling)
child->m_next_sibling->m_prev_sibling = 0;
else
m_last_node = 0;
child->m_parent = 0;
}
//! Removes last child of the node.
//! If node has no children, behaviour is undefined.
//! Use first_node() to test if node has children.
void remove_last_node()
{
assert(first_node());
xml_node<Ch> *child = m_last_node;
if (child->m_prev_sibling)
{
m_last_node = child->m_prev_sibling;
child->m_prev_sibling->m_next_sibling = 0;
}
else
m_first_node = 0;
child->m_parent = 0;
}
//! Removes specified child from the node
// \param where Pointer to child to be removed.
void remove_node(xml_node<Ch> *where)
{
assert(where && where->parent() == this);
assert(first_node());
if (where == m_first_node)
remove_first_node();
else if (where == m_last_node)
remove_last_node();
else
{
where->m_prev_sibling->m_next_sibling = where->m_next_sibling;
where->m_next_sibling->m_prev_sibling = where->m_prev_sibling;
where->m_parent = 0;
}
}
//! Removes all child nodes (but not attributes).
void remove_all_nodes()
{
for (xml_node<Ch> *node = first_node(); node; node = node->m_next_sibling)
node->m_parent = 0;
m_first_node = 0;
}
//! Prepends a new attribute to the node.
//! \param attribute Attribute to prepend.
void prepend_attribute(xml_attribute<Ch> *attribute)
{
assert(attribute && !attribute->parent());
if (first_attribute())
{
attribute->m_next_attribute = m_first_attribute;
m_first_attribute->m_prev_attribute = attribute;
}
else
{
attribute->m_next_attribute = 0;
m_last_attribute = attribute;
}
m_first_attribute = attribute;
attribute->m_parent = this;
attribute->m_prev_attribute = 0;
}
//! Appends a new attribute to the node.
//! \param attribute Attribute to append.
void append_attribute(xml_attribute<Ch> *attribute)
{
assert(attribute && !attribute->parent());
if (first_attribute())
{
attribute->m_prev_attribute = m_last_attribute;
m_last_attribute->m_next_attribute = attribute;
}
else
{
attribute->m_prev_attribute = 0;
m_first_attribute = attribute;
}
m_last_attribute = attribute;
attribute->m_parent = this;
attribute->m_next_attribute = 0;
}
//! Inserts a new attribute at specified place inside the node.
//! All attributes after and including the specified attribute are moved one position back.
//! \param where Place where to insert the attribute, or 0 to insert at the back.
//! \param attribute Attribute to insert.
void insert_attribute(xml_attribute<Ch> *where, xml_attribute<Ch> *attribute)
{
assert(!where || where->parent() == this);
assert(attribute && !attribute->parent());
if (where == m_first_attribute)
prepend_attribute(attribute);
else if (where == 0)
append_attribute(attribute);
else
{
attribute->m_prev_attribute = where->m_prev_attribute;
attribute->m_next_attribute = where;
where->m_prev_attribute->m_next_attribute = attribute;
where->m_prev_attribute = attribute;
attribute->m_parent = this;
}
}
//! Removes first attribute of the node.
//! If node has no attributes, behaviour is undefined.
//! Use first_attribute() to test if node has attributes.
void remove_first_attribute()
{
assert(first_attribute());
xml_attribute<Ch> *attribute = m_first_attribute;
if (attribute->m_next_attribute)
{
attribute->m_next_attribute->m_prev_attribute = 0;
}
else
m_last_attribute = 0;
attribute->m_parent = 0;
m_first_attribute = attribute->m_next_attribute;
}
//! Removes last attribute of the node.
//! If node has no attributes, behaviour is undefined.
//! Use first_attribute() to test if node has attributes.
void remove_last_attribute()
{
assert(first_attribute());
xml_attribute<Ch> *attribute = m_last_attribute;
if (attribute->m_prev_attribute)
{
attribute->m_prev_attribute->m_next_attribute = 0;
m_last_attribute = attribute->m_prev_attribute;
}
else
m_first_attribute = 0;
attribute->m_parent = 0;
}
//! Removes specified attribute from node.
//! \param where Pointer to attribute to be removed.
void remove_attribute(xml_attribute<Ch> *where)
{
assert(first_attribute() && where->parent() == this);
if (where == m_first_attribute)
remove_first_attribute();
else if (where == m_last_attribute)
remove_last_attribute();
else
{
where->m_prev_attribute->m_next_attribute = where->m_next_attribute;
where->m_next_attribute->m_prev_attribute = where->m_prev_attribute;
where->m_parent = 0;
}
}
//! Removes all attributes of node.
void remove_all_attributes()
{
for (xml_attribute<Ch> *attribute = first_attribute(); attribute; attribute = attribute->m_next_attribute)
attribute->m_parent = 0;
m_first_attribute = 0;
}
private:
///////////////////////////////////////////////////////////////////////////
// Restrictions
// No copying
xml_node(const xml_node &);
void operator =(const xml_node &);
///////////////////////////////////////////////////////////////////////////
// Data members
// Note that some of the pointers below have UNDEFINED values if certain other pointers are 0.
// This is required for maximum performance, as it allows the parser to omit initialization of
// unneded/redundant values.
//
// The rules are as follows:
// 1. first_node and first_attribute contain valid pointers, or 0 if node has no children/attributes respectively
// 2. last_node and last_attribute are valid only if node has at least one child/attribute respectively, otherwise they contain garbage
// 3. prev_sibling and next_sibling are valid only if node has a parent, otherwise they contain garbage
node_type m_type; // Type of node; always valid
xml_node<Ch> *m_first_node; // Pointer to first child node, or 0 if none; always valid
xml_node<Ch> *m_last_node; // Pointer to last child node, or 0 if none; this value is only valid if m_first_node is non-zero
xml_attribute<Ch> *m_first_attribute; // Pointer to first attribute of node, or 0 if none; always valid
xml_attribute<Ch> *m_last_attribute; // Pointer to last attribute of node, or 0 if none; this value is only valid if m_first_attribute is non-zero
xml_node<Ch> *m_prev_sibling; // Pointer to previous sibling of node, or 0 if none; this value is only valid if m_parent is non-zero
xml_node<Ch> *m_next_sibling; // Pointer to next sibling of node, or 0 if none; this value is only valid if m_parent is non-zero
};
///////////////////////////////////////////////////////////////////////////
// XML document
//! This class represents root of the DOM hierarchy.
//! It is also an xml_node and a memory_pool through public inheritance.
//! Use parse() function to build a DOM tree from a zero-terminated XML text string.
//! parse() function allocates memory for nodes and attributes by using functions of xml_document,
//! which are inherited from memory_pool.
//! To access root node of the document, use the document itself, as if it was an xml_node.
//! \tparam Ch Character type to use.
template<class Ch = char>
class xml_document: public xml_node<Ch>, public memory_pool<Ch>
{
public:
//! Constructs empty XML document
xml_document()
: xml_node<Ch>(node_document)
{
}
//! Parses zero-terminated XML string according to given flags.
//! Passed string will be modified by the parser, unless rapidxml::parse_non_destructive flag is used.
//! The string must persist for the lifetime of the document.
//! In case of error, rapidxml::parse_error exception will be thrown.
//! <br><br>
//! If you want to parse contents of a file, you must first load the file into the memory, and pass pointer to its beginning.
//! Make sure that data is zero-terminated.
//! <br><br>
//! Document can be parsed into multiple times.
//! Each new call to parse removes previous nodes and attributes (if any), but does not clear memory pool.
//! \param text XML data to parse; pointer is non-const to denote fact that this data may be modified by the parser.
template<int Flags>
void parse(Ch *text)
{
assert(text);
// Remove current contents
this->remove_all_nodes();
this->remove_all_attributes();
// Parse BOM, if any
parse_bom<Flags>(text);
// Parse children
while (1)
{
// Skip whitespace before node
skip<whitespace_pred, Flags>(text);
if (*text == 0)
break;
// Parse and append new child
if (*text == Ch('<'))
{
++text; // Skip '<'
if (xml_node<Ch> *node = parse_node<Flags>(text))
this->append_node(node);
}
else
CEREAL_RAPIDXML_PARSE_ERROR("expected <", text);
}
}
//! Clears the document by deleting all nodes and clearing the memory pool.
//! All nodes owned by document pool are destroyed.
void clear()
{
this->remove_all_nodes();
this->remove_all_attributes();
memory_pool<Ch>::clear();
}
private:
///////////////////////////////////////////////////////////////////////
// Internal character utility functions
// Detect whitespace character
struct whitespace_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_whitespace[static_cast<unsigned char>(ch)];
}
};
// Detect node name character
struct node_name_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_node_name[static_cast<unsigned char>(ch)];
}
};
// Detect attribute name character
struct attribute_name_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_attribute_name[static_cast<unsigned char>(ch)];
}
};
// Detect text character (PCDATA)
struct text_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_text[static_cast<unsigned char>(ch)];
}
};
// Detect text character (PCDATA) that does not require processing
struct text_pure_no_ws_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_text_pure_no_ws[static_cast<unsigned char>(ch)];
}
};
// Detect text character (PCDATA) that does not require processing
struct text_pure_with_ws_pred
{
static unsigned char test(Ch ch)
{
return internal::lookup_tables<0>::lookup_text_pure_with_ws[static_cast<unsigned char>(ch)];
}
};
// Detect attribute value character
template<Ch Quote>
struct attribute_value_pred
{
static unsigned char test(Ch ch)
{
if (Quote == Ch('\''))
return internal::lookup_tables<0>::lookup_attribute_data_1[static_cast<unsigned char>(ch)];
if (Quote == Ch('\"'))
return internal::lookup_tables<0>::lookup_attribute_data_2[static_cast<unsigned char>(ch)];
return 0; // Should never be executed, to avoid warnings on Comeau
}
};
// Detect attribute value character
template<Ch Quote>
struct attribute_value_pure_pred
{
static unsigned char test(Ch ch)
{
if (Quote == Ch('\''))
return internal::lookup_tables<0>::lookup_attribute_data_1_pure[static_cast<unsigned char>(ch)];
if (Quote == Ch('\"'))
return internal::lookup_tables<0>::lookup_attribute_data_2_pure[static_cast<unsigned char>(ch)];
return 0; // Should never be executed, to avoid warnings on Comeau
}
};
// Insert coded character, using UTF8 or 8-bit ASCII
template<int Flags>
static void insert_coded_character(Ch *&text, unsigned long code)
{
if (Flags & parse_no_utf8)
{
// Insert 8-bit ASCII character
// Todo: possibly verify that code is less than 256 and use replacement char otherwise?
text[0] = static_cast<Ch>(code);
text += 1;
}
else
{
// Insert UTF8 sequence
if (code < 0x80) // 1 byte sequence
{
text[0] = static_cast<Ch>(code);
text += 1;
}
else if (code < 0x800) // 2 byte sequence
{
text[1] = static_cast<Ch>((code | 0x80) & 0xBF); code >>= 6;
text[0] = static_cast<Ch>(code | 0xC0);
text += 2;
}
else if (code < 0x10000) // 3 byte sequence
{
text[2] = static_cast<Ch>((code | 0x80) & 0xBF); code >>= 6;
text[1] = static_cast<Ch>((code | 0x80) & 0xBF); code >>= 6;
text[0] = static_cast<Ch>(code | 0xE0);
text += 3;
}
else if (code < 0x110000) // 4 byte sequence
{
text[3] = static_cast<Ch>((code | 0x80) & 0xBF); code >>= 6;
text[2] = static_cast<Ch>((code | 0x80) & 0xBF); code >>= 6;
text[1] = static_cast<Ch>((code | 0x80) & 0xBF); code >>= 6;
text[0] = static_cast<Ch>(code | 0xF0);
text += 4;
}
else // Invalid, only codes up to 0x10FFFF are allowed in Unicode
{
CEREAL_RAPIDXML_PARSE_ERROR("invalid numeric character entity", text);
}
}
}
// Skip characters until predicate evaluates to true
template<class StopPred, int Flags>
static void skip(Ch *&text)
{
Ch *tmp = text;
while (StopPred::test(*tmp))
++tmp;
text = tmp;
}
// Skip characters until predicate evaluates to true while doing the following:
// - replacing XML character entity references with proper characters (' & " < > &#...;)
// - condensing whitespace sequences to single space character
template<class StopPred, class StopPredPure, int Flags>
static Ch *skip_and_expand_character_refs(Ch *&text, bool preserve_space)
{
// If entity translation, whitespace condense and whitespace trimming is disabled, use plain skip
if (Flags & parse_no_entity_translation &&
!(Flags & parse_normalize_whitespace) &&
!(Flags & parse_trim_whitespace))
{
skip<StopPred, Flags>(text);
return text;
}
// Use simple skip until first modification is detected
skip<StopPredPure, Flags>(text);
// Use translation skip
Ch *src = text;
Ch *dest = src;
while (StopPred::test(*src))
{
// If entity translation is enabled
if (!(Flags & parse_no_entity_translation))
{
// Test if replacement is needed
if (src[0] == Ch('&'))
{
switch (src[1])
{
// & '
case Ch('a'):
if (src[2] == Ch('m') && src[3] == Ch('p') && src[4] == Ch(';'))
{
*dest = Ch('&');
++dest;
src += 5;
continue;
}
if (src[2] == Ch('p') && src[3] == Ch('o') && src[4] == Ch('s') && src[5] == Ch(';'))
{
*dest = Ch('\'');
++dest;
src += 6;
continue;
}
break;
// "
case Ch('q'):
if (src[2] == Ch('u') && src[3] == Ch('o') && src[4] == Ch('t') && src[5] == Ch(';'))
{
*dest = Ch('"');
++dest;
src += 6;
continue;
}
break;
// >
case Ch('g'):
if (src[2] == Ch('t') && src[3] == Ch(';'))
{
*dest = Ch('>');
++dest;
src += 4;
continue;
}
break;
// <
case Ch('l'):
if (src[2] == Ch('t') && src[3] == Ch(';'))
{
*dest = Ch('<');
++dest;
src += 4;
continue;
}
break;
// &#...; - assumes ASCII
case Ch('#'):
if (src[2] == Ch('x'))
{
unsigned long code = 0;
src += 3; // Skip &#x
while (1)
{
unsigned char digit = internal::lookup_tables<0>::lookup_digits[static_cast<unsigned char>(*src)];
if (digit == 0xFF)
break;
code = code * 16 + digit;
++src;
}
insert_coded_character<Flags>(dest, code); // Put character in output
}
else
{
unsigned long code = 0;
src += 2; // Skip &#
while (1)
{
unsigned char digit = internal::lookup_tables<0>::lookup_digits[static_cast<unsigned char>(*src)];
if (digit == 0xFF)
break;
code = code * 10 + digit;
++src;
}
insert_coded_character<Flags>(dest, code); // Put character in output
}
if (*src == Ch(';'))
++src;
else
CEREAL_RAPIDXML_PARSE_ERROR("expected ;", src);
continue;
// Something else
default:
// Ignore, just copy '&' verbatim
break;
}
}
}
// If whitespace condensing is enabled
if ((Flags & parse_normalize_whitespace) && !preserve_space)
{
// Test if condensing is needed
if (whitespace_pred::test(*src))
{
*dest = Ch(' '); ++dest; // Put single space in dest
++src; // Skip first whitespace char
// Skip remaining whitespace chars
while (whitespace_pred::test(*src))
++src;
continue;
}
}
// No replacement, only copy character
*dest++ = *src++;
}
// Return new end
text = src;
return dest;
}
///////////////////////////////////////////////////////////////////////
// Internal parsing functions
// Parse BOM, if any
template<int Flags>
void parse_bom(Ch *&text)
{
// UTF-8?
if (static_cast<unsigned char>(text[0]) == 0xEF &&
static_cast<unsigned char>(text[1]) == 0xBB &&
static_cast<unsigned char>(text[2]) == 0xBF)
{
text += 3; // Skup utf-8 bom
}
}
// Parse XML declaration (<?xml...)
template<int Flags>
xml_node<Ch> *parse_xml_declaration(Ch *&text)
{
// If parsing of declaration is disabled
if (!(Flags & parse_declaration_node))
{
// Skip until end of declaration
while (text[0] != Ch('?') || text[1] != Ch('>'))
{
if (!text[0])
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 2; // Skip '?>'
return 0;
}
// Create declaration
xml_node<Ch> *declaration = this->allocate_node(node_declaration);
// Skip whitespace before attributes or ?>
skip<whitespace_pred, Flags>(text);
// Parse declaration attributes
parse_node_attributes<Flags>(text, declaration);
// Skip ?>
if (text[0] != Ch('?') || text[1] != Ch('>'))
CEREAL_RAPIDXML_PARSE_ERROR("expected ?>", text);
text += 2;
return declaration;
}
// Parse XML comment (<!--...)
template<int Flags>
xml_node<Ch> *parse_comment(Ch *&text)
{
// If parsing of comments is disabled
if (!(Flags & parse_comment_nodes))
{
// Skip until end of comment
while (text[0] != Ch('-') || text[1] != Ch('-') || text[2] != Ch('>'))
{
if (!text[0])
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 3; // Skip '-->'
return 0; // Do not produce comment node
}
// Remember value start
Ch *value_ = text;
// Skip until end of comment
while (text[0] != Ch('-') || text[1] != Ch('-') || text[2] != Ch('>'))
{
if (!text[0])
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
// Create comment node
xml_node<Ch> *comment = this->allocate_node(node_comment);
comment->value(value_, static_cast<std::size_t>(text - value_));
// Place zero terminator after comment value
if (!(Flags & parse_no_string_terminators))
*text = Ch('\0');
text += 3; // Skip '-->'
return comment;
}
// Parse DOCTYPE
template<int Flags>
xml_node<Ch> *parse_doctype(Ch *&text)
{
// Remember value start
Ch *value_ = text;
// Skip to >
while (*text != Ch('>'))
{
// Determine character type
switch (*text)
{
// If '[' encountered, scan for matching ending ']' using naive algorithm with depth
// This works for all W3C test files except for 2 most wicked
case Ch('['):
{
++text; // Skip '['
int depth = 1;
while (depth > 0)
{
switch (*text)
{
case Ch('['): ++depth; break;
case Ch(']'): --depth; break;
case 0: CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
}
++text;
}
break;
}
// Error on end of text
case Ch('\0'):
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
// Other character, skip it
default:
++text;
}
}
// If DOCTYPE nodes enabled
if (Flags & parse_doctype_node)
{
// Create a new doctype node
xml_node<Ch> *doctype = this->allocate_node(node_doctype);
doctype->value(value_, static_cast<std::size_t>(text - value_));
// Place zero terminator after value
if (!(Flags & parse_no_string_terminators))
*text = Ch('\0');
text += 1; // skip '>'
return doctype;
}
else
{
text += 1; // skip '>'
return 0;
}
}
// Parse PI
template<int Flags>
xml_node<Ch> *parse_pi(Ch *&text)
{
// If creation of PI nodes is enabled
if (Flags & parse_pi_nodes)
{
// Create pi node
xml_node<Ch> *pi = this->allocate_node(node_pi);
// Extract PI target name
Ch *name_ = text;
skip<node_name_pred, Flags>(text);
if (text == name_)
CEREAL_RAPIDXML_PARSE_ERROR("expected PI target", text);
pi->name(name_, static_cast<std::size_t>(text - name_));
// Skip whitespace between pi target and pi
skip<whitespace_pred, Flags>(text);
// Remember start of pi
Ch *value_ = text;
// Skip to '?>'
while (text[0] != Ch('?') || text[1] != Ch('>'))
{
if (*text == Ch('\0'))
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
// Set pi value (verbatim, no entity expansion or whitespace normalization)
pi->value(value_, static_cast<std::size_t>(text - value_));
// Place zero terminator after name and value
if (!(Flags & parse_no_string_terminators))
{
pi->name()[pi->name_size()] = Ch('\0');
pi->value()[pi->value_size()] = Ch('\0');
}
text += 2; // Skip '?>'
return pi;
}
else
{
// Skip to '?>'
while (text[0] != Ch('?') || text[1] != Ch('>'))
{
if (*text == Ch('\0'))
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 2; // Skip '?>'
return 0;
}
}
// Parse and append data
// Return character that ends data.
// This is necessary because this character might have been overwritten by a terminating 0
template<int Flags>
Ch parse_and_append_data(xml_node<Ch> *node, Ch *&text, Ch *contents_start)
{
// Backup to contents start if whitespace trimming is disabled
if (!(Flags & parse_trim_whitespace))
text = contents_start;
const bool preserve_space = internal::preserve_space(node);
// Skip until end of data
Ch *value_ = text, *end;
if ((Flags & parse_normalize_whitespace) && !preserve_space)
end = skip_and_expand_character_refs<text_pred, text_pure_with_ws_pred, Flags>(text, false);
else
end = skip_and_expand_character_refs<text_pred, text_pure_no_ws_pred, Flags>(text, preserve_space);
// Trim trailing whitespace if flag is set; leading was already trimmed by whitespace skip after >
if ((Flags & parse_trim_whitespace) && !preserve_space)
{
if (Flags & parse_normalize_whitespace)
{
// Whitespace is already condensed to single space characters by skipping function, so just trim 1 char off the end
if (*(end - 1) == Ch(' '))
--end;
}
else
{
// Backup until non-whitespace character is found
while (whitespace_pred::test(*(end - 1)))
--end;
}
}
// If characters are still left between end and value (this test is only necessary if normalization is enabled)
// Create new data node
if (!(Flags & parse_no_data_nodes))
{
xml_node<Ch> *data = this->allocate_node(node_data);
data->value(value_, static_cast<std::size_t>(end - value_));
node->append_node(data);
}
// Add data to parent node if no data exists yet
if (!(Flags & parse_no_element_values))
if (*node->value() == Ch('\0'))
node->value(value_, static_cast<std::size_t>(end - value_));
// Place zero terminator after value
if (!(Flags & parse_no_string_terminators))
{
Ch ch = *text;
*end = Ch('\0');
return ch; // Return character that ends data; this is required because zero terminator overwritten it
}
// Return character that ends data
return *text;
}
// Parse CDATA
template<int Flags>
xml_node<Ch> *parse_cdata(Ch *&text)
{
// If CDATA is disabled
if (Flags & parse_no_data_nodes)
{
// Skip until end of cdata
while (text[0] != Ch(']') || text[1] != Ch(']') || text[2] != Ch('>'))
{
if (!text[0])
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
text += 3; // Skip ]]>
return 0; // Do not produce CDATA node
}
// Skip until end of cdata
Ch *value_ = text;
while (text[0] != Ch(']') || text[1] != Ch(']') || text[2] != Ch('>'))
{
if (!text[0])
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
// Create new cdata node
xml_node<Ch> *cdata = this->allocate_node(node_cdata);
cdata->value(value_, static_cast<std::size_t>(text - value_));
// Place zero terminator after value
if (!(Flags & parse_no_string_terminators))
*text = Ch('\0');
text += 3; // Skip ]]>
return cdata;
}
// Parse element node
template<int Flags>
xml_node<Ch> *parse_element(Ch *&text)
{
// Create element node
xml_node<Ch> *element = this->allocate_node(node_element);
// Extract element name
Ch *name_ = text;
skip<node_name_pred, Flags>(text);
if (text == name_)
CEREAL_RAPIDXML_PARSE_ERROR("expected element name", text);
element->name(name_, static_cast<std::size_t>(text - name_));
// Skip whitespace between element name and attributes or >
skip<whitespace_pred, Flags>(text);
// Parse attributes, if any
parse_node_attributes<Flags>(text, element);
// Determine ending type
if (*text == Ch('>'))
{
++text;
parse_node_contents<Flags>(text, element);
}
else if (*text == Ch('/'))
{
++text;
if (*text != Ch('>'))
CEREAL_RAPIDXML_PARSE_ERROR("expected >", text);
++text;
}
else
CEREAL_RAPIDXML_PARSE_ERROR("expected >", text);
// Place zero terminator after name
if (!(Flags & parse_no_string_terminators))
element->name()[element->name_size()] = Ch('\0');
// Return parsed element
return element;
}
// Determine node type, and parse it
template<int Flags>
xml_node<Ch> *parse_node(Ch *&text)
{
// Parse proper node type
switch (text[0])
{
// <...
default:
// Parse and append element node
return parse_element<Flags>(text);
// <?...
case Ch('?'):
++text; // Skip ?
if ((text[0] == Ch('x') || text[0] == Ch('X')) &&
(text[1] == Ch('m') || text[1] == Ch('M')) &&
(text[2] == Ch('l') || text[2] == Ch('L')) &&
whitespace_pred::test(text[3]))
{
// '<?xml ' - xml declaration
text += 4; // Skip 'xml '
return parse_xml_declaration<Flags>(text);
}
else
{
// Parse PI
return parse_pi<Flags>(text);
}
// <!...
case Ch('!'):
// Parse proper subset of <! node
switch (text[1])
{
// <!-
case Ch('-'):
if (text[2] == Ch('-'))
{
// '<!--' - xml comment
text += 3; // Skip '!--'
return parse_comment<Flags>(text);
}
break;
// <![
case Ch('['):
if (text[2] == Ch('C') && text[3] == Ch('D') && text[4] == Ch('A') &&
text[5] == Ch('T') && text[6] == Ch('A') && text[7] == Ch('['))
{
// '<![CDATA[' - cdata
text += 8; // Skip '![CDATA['
return parse_cdata<Flags>(text);
}
break;
// <!D
case Ch('D'):
if (text[2] == Ch('O') && text[3] == Ch('C') && text[4] == Ch('T') &&
text[5] == Ch('Y') && text[6] == Ch('P') && text[7] == Ch('E') &&
whitespace_pred::test(text[8]))
{
// '<!DOCTYPE ' - doctype
text += 9; // skip '!DOCTYPE '
return parse_doctype<Flags>(text);
}
} // switch
// Attempt to skip other, unrecognized node types starting with <!
++text; // Skip !
while (*text != Ch('>'))
{
if (*text == 0)
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
++text;
}
++text; // Skip '>'
return 0; // No node recognized
}
}
// Parse contents of the node - children, data etc.
template<int Flags>
void parse_node_contents(Ch *&text, xml_node<Ch> *node)
{
// For all children and text
while (1)
{
// Skip whitespace between > and node contents
Ch *contents_start = text; // Store start of node contents before whitespace is skipped
skip<whitespace_pred, Flags>(text);
Ch next_char = *text;
// After data nodes, instead of continuing the loop, control jumps here.
// This is because zero termination inside parse_and_append_data() function
// would wreak havoc with the above code.
// Also, skipping whitespace after data nodes is unnecessary.
after_data_node:
// Determine what comes next: node closing, child node, data node, or 0?
switch (next_char)
{
// Node closing or child node
case Ch('<'):
if (text[1] == Ch('/'))
{
Ch *contents_end = 0;
if (internal::preserve_space(node))
{
contents_end = text;
}
// Node closing
text += 2; // Skip '</'
if (Flags & parse_validate_closing_tags)
{
// Skip and validate closing tag name
Ch *closing_name = text;
skip<node_name_pred, Flags>(text);
if (!internal::compare(node->name(), node->name_size(), closing_name, static_cast<std::size_t>(text - closing_name), true))
CEREAL_RAPIDXML_PARSE_ERROR("invalid closing tag name", text);
}
else
{
// No validation, just skip name
skip<node_name_pred, Flags>(text);
}
// Skip remaining whitespace after node name
skip<whitespace_pred, Flags>(text);
if (*text != Ch('>'))
CEREAL_RAPIDXML_PARSE_ERROR("expected >", text);
++text; // Skip '>'
if (contents_end && contents_end != contents_start)
{
node->value(contents_start, static_cast<std::size_t>(contents_end - contents_start));
node->value()[node->value_size()] = Ch('\0');
}
return; // Node closed, finished parsing contents
}
else
{
// Child node
++text; // Skip '<'
if (xml_node<Ch> *child = parse_node<Flags>(text))
node->append_node(child);
}
break;
// End of data - error
case Ch('\0'):
CEREAL_RAPIDXML_PARSE_ERROR("unexpected end of data", text);
// Data node
default:
next_char = parse_and_append_data<Flags>(node, text, contents_start);
goto after_data_node; // Bypass regular processing after data nodes
}
}
}
// Parse XML attributes of the node
template<int Flags>
void parse_node_attributes(Ch *&text, xml_node<Ch> *node)
{
// For all attributes
while (attribute_name_pred::test(*text))
{
// Extract attribute name
Ch *name_ = text;
++text; // Skip first character of attribute name
skip<attribute_name_pred, Flags>(text);
if (text == name_)
CEREAL_RAPIDXML_PARSE_ERROR("expected attribute name", name_);
// Create new attribute
xml_attribute<Ch> *attribute = this->allocate_attribute();
attribute->name(name_, static_cast<std::size_t>(text - name_));
node->append_attribute(attribute);
// Skip whitespace after attribute name
skip<whitespace_pred, Flags>(text);
// Skip =
if (*text != Ch('='))
CEREAL_RAPIDXML_PARSE_ERROR("expected =", text);
++text;
// Add terminating zero after name
if (!(Flags & parse_no_string_terminators))
attribute->name()[attribute->name_size()] = 0;
// Skip whitespace after =
skip<whitespace_pred, Flags>(text);
// Skip quote and remember if it was ' or "
Ch quote = *text;
if (quote != Ch('\'') && quote != Ch('"'))
CEREAL_RAPIDXML_PARSE_ERROR("expected ' or \"", text);
++text;
// Extract attribute value and expand char refs in it
Ch *value_ = text, *end;
const int AttFlags = Flags & ~parse_normalize_whitespace; // No whitespace normalization in attributes
if (quote == Ch('\''))
end = skip_and_expand_character_refs<attribute_value_pred<Ch('\'')>, attribute_value_pure_pred<Ch('\'')>, AttFlags>(text, false);
else
end = skip_and_expand_character_refs<attribute_value_pred<Ch('"')>, attribute_value_pure_pred<Ch('"')>, AttFlags>(text, false);
// Set attribute value
attribute->value(value_, static_cast<std::size_t>(end - value_));
// Make sure that end quote is present
if (*text != quote)
CEREAL_RAPIDXML_PARSE_ERROR("expected ' or \"", text);
++text; // Skip quote
// Add terminating zero after value
if (!(Flags & parse_no_string_terminators))
attribute->value()[attribute->value_size()] = 0;
// Skip whitespace after attribute value
skip<whitespace_pred, Flags>(text);
}
}
};
//! \cond internal
namespace internal
{
// Whitespace (space \n \r \t)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_whitespace[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, // 0
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 4
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 5
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 6
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 7
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // C
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // D
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // E
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // F
};
// Node name (anything but space \n \r \t / > ? \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_node_name[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Text (i.e. PCDATA) (anything but < \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_text[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Text (i.e. PCDATA) that does not require processing when ws normalization is disabled
// (anything but < \0 &)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_text_pure_no_ws[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Text (i.e. PCDATA) that does not require processing when ws normalizationis is enabled
// (anything but < \0 & space \n \r \t)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_text_pure_with_ws[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute name (anything but space \n \r \t / < > = ? ! \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_name[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with single quote (anything but ' \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_1[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with single quote that does not require processing (anything but ' \0 &)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_1_pure[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with double quote (anything but " \0)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_2[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Attribute data with double quote that does not require processing (anything but " \0 &)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_attribute_data_2_pure[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 8
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F
};
// Digits (dec and hex, 255 denotes end of numeric character reference)
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_digits[256] =
{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 0
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 1
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 2
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,255,255,255,255,255,255, // 3
255, 10, 11, 12, 13, 14, 15,255,255,255,255,255,255,255,255,255, // 4
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 5
255, 10, 11, 12, 13, 14, 15,255,255,255,255,255,255,255,255,255, // 6
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 7
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 8
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // 9
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // A
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // B
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // C
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // D
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, // E
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255 // F
};
// Upper case conversion
template<int Dummy>
const unsigned char lookup_tables<Dummy>::lookup_upcase[256] =
{
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A B C D E F
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, // 0
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, // 1
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, // 2
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, // 3
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, // 4
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, // 5
96, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, // 6
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 123,124,125,126,127, // 7
128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, // 8
144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, // 9
160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, // A
176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, // B
192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, // C
208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, // D
224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, // E
240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255 // F
};
}
//! \endcond
}
} // end namespace cereal
// Undefine internal macros
#undef CEREAL_RAPIDXML_PARSE_ERROR
// On MSVC, restore warnings state
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/tc-schedule.sh | #!/bin/bash
set -ex
curdir=$(dirname "$0")/
pip3 install --quiet --user --upgrade pip
export PATH=$HOME/.local/bin/:$PATH
pip3 install --quiet --user --upgrade -r ${curdir}/tc-decision_reqs.txt
# First, perform dry run for push and pull request
# This should help us track merge failures in advance
for event in pull_request.opened pull_request.synchronize pull_request.reopened push;
do
GITHUB_EVENT="${event}" \
TASK_ID="aa" \
GITHUB_HEAD_BRANCHORTAG="branchName" \
GITHUB_HEAD_REF="refs/heads/branchName" \
GITHUB_HEAD_SHA="branchName" \
GITHUB_HEAD_REPO_URL="url" \
GITHUB_HEAD_USER_LOGIN="user" \
GITHUB_HEAD_USER_EMAIL="email" \
python3 ${curdir}/tc-decision.py --dry
done;
GITHUB_EVENT="tag" \
TASK_ID="aa" \
GITHUB_HEAD_BRANCHORTAG="tagName" \
GITHUB_HEAD_REF="refs/tags/tagName" \
GITHUB_HEAD_SHA="branchName" \
GITHUB_HEAD_REPO_URL="url" \
GITHUB_HEAD_USER_LOGIN="user" \
GITHUB_HEAD_USER_EMAIL="email" \
python3 ${curdir}/tc-decision.py --dry
# Create a new env variable for usage in TaskCluster .yml files
export GITHUB_HEAD_BRANCHORTAG="${GITHUB_HEAD_BRANCH}${GITHUB_HEAD_TAG}"
# Quick hack because tc-decision uses GITHUB_HEAD_BRANCH
export GITHUB_HEAD_BRANCH="${GITHUB_HEAD_BRANCH}${GITHUB_HEAD_TAG}"
python3 ${curdir}/tc-decision.py
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/extensions/linear/linear-fst.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Classes for building, storing and representing log-linear models as FSTs.
#ifndef FST_EXTENSIONS_LINEAR_LINEAR_FST_H_
#define FST_EXTENSIONS_LINEAR_LINEAR_FST_H_
#include <algorithm>
#include <iostream>
#include <memory>
#include <vector>
#include <fst/compat.h>
#include <fst/log.h>
#include <fst/extensions/pdt/collection.h>
#include <fst/bi-table.h>
#include <fst/cache.h>
#include <fstream>
#include <fst/fst.h>
#include <fst/matcher.h>
#include <fst/symbol-table.h>
#include <fst/extensions/linear/linear-fst-data.h>
namespace fst {
// Forward declaration of the specialized matcher for both
// LinearTaggerFst and LinearClassifierFst.
template <class F>
class LinearFstMatcherTpl;
namespace internal {
// Implementation class for on-the-fly generated LinearTaggerFst with
// special optimization in matching.
template <class A>
class LinearTaggerFstImpl : public CacheImpl<A> {
public:
using FstImpl<A>::SetType;
using FstImpl<A>::SetProperties;
using FstImpl<A>::SetInputSymbols;
using FstImpl<A>::SetOutputSymbols;
using FstImpl<A>::WriteHeader;
using CacheBaseImpl<CacheState<A>>::PushArc;
using CacheBaseImpl<CacheState<A>>::HasArcs;
using CacheBaseImpl<CacheState<A>>::HasFinal;
using CacheBaseImpl<CacheState<A>>::HasStart;
using CacheBaseImpl<CacheState<A>>::SetArcs;
using CacheBaseImpl<CacheState<A>>::SetFinal;
using CacheBaseImpl<CacheState<A>>::SetStart;
typedef A Arc;
typedef typename A::Label Label;
typedef typename A::Weight Weight;
typedef typename A::StateId StateId;
typedef typename Collection<StateId, Label>::SetIterator NGramIterator;
// Constructs an empty FST by default.
LinearTaggerFstImpl()
: CacheImpl<A>(CacheOptions()),
data_(std::make_shared<LinearFstData<A>>()),
delay_(0) {
SetType("linear-tagger");
}
// Constructs the FST with given data storage and symbol
// tables.
//
// TODO(wuke): when there is no constraint on output we can delay
// less than `data->MaxFutureSize` positions.
LinearTaggerFstImpl(const LinearFstData<Arc> *data, const SymbolTable *isyms,
const SymbolTable *osyms, CacheOptions opts)
: CacheImpl<A>(opts), data_(data), delay_(data->MaxFutureSize()) {
SetType("linear-tagger");
SetProperties(kILabelSorted, kFstProperties);
SetInputSymbols(isyms);
SetOutputSymbols(osyms);
ReserveStubSpace();
}
// Copy by sharing the underlying data storage.
LinearTaggerFstImpl(const LinearTaggerFstImpl &impl)
: CacheImpl<A>(impl), data_(impl.data_), delay_(impl.delay_) {
SetType("linear-tagger");
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
ReserveStubSpace();
}
StateId Start() {
if (!HasStart()) {
StateId start = FindStartState();
SetStart(start);
}
return CacheImpl<A>::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) {
state_stub_.clear();
FillState(s, &state_stub_);
if (CanBeFinal(state_stub_))
SetFinal(s, data_->FinalWeight(InternalBegin(state_stub_),
InternalEnd(state_stub_)));
else
SetFinal(s, Weight::Zero());
}
return CacheImpl<A>::Final(s);
}
size_t NumArcs(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<A>::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<A>::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<A>::NumOutputEpsilons(s);
}
void InitArcIterator(StateId s, ArcIteratorData<A> *data) {
if (!HasArcs(s)) Expand(s);
CacheImpl<A>::InitArcIterator(s, data);
}
// Computes the outgoing transitions from a state, creating new
// destination states as needed.
void Expand(StateId s);
// Appends to `arcs` all out-going arcs from state `s` that matches `label` as
// the input label.
void MatchInput(StateId s, Label ilabel, std::vector<Arc> *arcs);
static LinearTaggerFstImpl *Read(std::istream &strm,
const FstReadOptions &opts);
bool Write(std::ostream &strm, // NOLINT
const FstWriteOptions &opts) const {
FstHeader header;
header.SetStart(kNoStateId);
WriteHeader(strm, opts, kFileVersion, &header);
data_->Write(strm);
if (!strm) {
LOG(ERROR) << "LinearTaggerFst::Write: Write failed: " << opts.source;
return false;
}
return true;
}
private:
static const int kMinFileVersion;
static const int kFileVersion;
// A collection of functions to access parts of the state tuple. A
// state tuple is a vector of `Label`s with two parts:
// [buffer] [internal].
//
// - [buffer] is a buffer of observed input labels with length
// `delay_`. `LinearFstData<A>::kStartOfSentence`
// (resp. `LinearFstData<A>::kEndOfSentence`) are used as
// paddings when the buffer has fewer than `delay_` elements, which
// can only appear as the prefix (resp. suffix) of the buffer.
//
// - [internal] is the internal state tuple for `LinearFstData`
typename std::vector<Label>::const_iterator BufferBegin(
const std::vector<Label> &state) const {
return state.begin();
}
typename std::vector<Label>::const_iterator BufferEnd(
const std::vector<Label> &state) const {
return state.begin() + delay_;
}
typename std::vector<Label>::const_iterator InternalBegin(
const std::vector<Label> &state) const {
return state.begin() + delay_;
}
typename std::vector<Label>::const_iterator InternalEnd(
const std::vector<Label> &state) const {
return state.end();
}
// The size of state tuples are fixed, reserve them in stubs
void ReserveStubSpace() {
state_stub_.reserve(delay_ + data_->NumGroups());
next_stub_.reserve(delay_ + data_->NumGroups());
}
// Computes the start state tuple and maps it to the start state id.
StateId FindStartState() {
// Empty buffer with start-of-sentence paddings
state_stub_.clear();
state_stub_.resize(delay_, LinearFstData<A>::kStartOfSentence);
// Append internal states
data_->EncodeStartState(&state_stub_);
return FindState(state_stub_);
}
// Tests whether the buffer in `(begin, end)` is empty.
bool IsEmptyBuffer(typename std::vector<Label>::const_iterator begin,
typename std::vector<Label>::const_iterator end) const {
// The following is guanranteed by `ShiftBuffer()`:
// - buffer[i] == LinearFstData<A>::kEndOfSentence =>
// buffer[i+x] == LinearFstData<A>::kEndOfSentence
// - buffer[i] == LinearFstData<A>::kStartOfSentence =>
// buffer[i-x] == LinearFstData<A>::kStartOfSentence
return delay_ == 0 || *(end - 1) == LinearFstData<A>::kStartOfSentence ||
*begin == LinearFstData<A>::kEndOfSentence;
}
// Tests whether the given state tuple can be a final state. A state
// is final iff there is no observed input in the buffer.
bool CanBeFinal(const std::vector<Label> &state) {
return IsEmptyBuffer(BufferBegin(state), BufferEnd(state));
}
// Finds state corresponding to an n-gram. Creates new state if n-gram not
// found.
StateId FindState(const std::vector<Label> &ngram) {
StateId sparse = ngrams_.FindId(ngram, true);
StateId dense = condensed_.FindId(sparse, true);
return dense;
}
// Appends after `output` the state tuple corresponding to the state id. The
// state id must exist.
void FillState(StateId s, std::vector<Label> *output) {
s = condensed_.FindEntry(s);
for (NGramIterator it = ngrams_.FindSet(s); !it.Done(); it.Next()) {
Label label = it.Element();
output->push_back(label);
}
}
// Shifts the buffer in `state` by appending `ilabel` and popping
// the one in the front as the return value. `next_stub_` is a
// shifted buffer of size `delay_` where the first `delay_ - 1`
// elements are the last `delay_ - 1` elements in the buffer of
// `state`. The last (if any) element in `next_stub_` will be
// `ilabel` after the call returns.
Label ShiftBuffer(const std::vector<Label> &state, Label ilabel,
std::vector<Label> *next_stub_);
// Builds an arc from state tuple `state` consuming `ilabel` and
// `olabel`. `next_stub_` is the buffer filled in `ShiftBuffer`.
Arc MakeArc(const std::vector<Label> &state, Label ilabel, Label olabel,
std::vector<Label> *next_stub_);
// Expands arcs from state `s`, equivalent to state tuple `state`,
// with input `ilabel`. `next_stub_` is the buffer filled in
// `ShiftBuffer`.
void ExpandArcs(StateId s, const std::vector<Label> &state, Label ilabel,
std::vector<Label> *next_stub_);
// Appends arcs from state `s`, equivalent to state tuple `state`,
// with input `ilabel` to `arcs`. `next_stub_` is the buffer filled
// in `ShiftBuffer`.
void AppendArcs(StateId s, const std::vector<Label> &state, Label ilabel,
std::vector<Label> *next_stub_, std::vector<Arc> *arcs);
std::shared_ptr<const LinearFstData<A>> data_;
size_t delay_;
// Mapping from internal state tuple to *non-consecutive* ids
Collection<StateId, Label> ngrams_;
// Mapping from non-consecutive id to actual state id
CompactHashBiTable<StateId, StateId, std::hash<StateId>> condensed_;
// Two frequently used vectors, reuse to avoid repeated heap
// allocation
std::vector<Label> state_stub_, next_stub_;
LinearTaggerFstImpl &operator=(const LinearTaggerFstImpl &) = delete;
};
template <class A>
const int LinearTaggerFstImpl<A>::kMinFileVersion = 1;
template <class A>
const int LinearTaggerFstImpl<A>::kFileVersion = 1;
template <class A>
inline typename A::Label LinearTaggerFstImpl<A>::ShiftBuffer(
const std::vector<Label> &state, Label ilabel,
std::vector<Label> *next_stub_) {
DCHECK(ilabel > 0 || ilabel == LinearFstData<A>::kEndOfSentence);
if (delay_ == 0) {
DCHECK_GT(ilabel, 0);
return ilabel;
} else {
(*next_stub_)[BufferEnd(*next_stub_) - next_stub_->begin() - 1] = ilabel;
return *BufferBegin(state);
}
}
template <class A>
inline A LinearTaggerFstImpl<A>::MakeArc(const std::vector<Label> &state,
Label ilabel, Label olabel,
std::vector<Label> *next_stub_) {
DCHECK(ilabel > 0 || ilabel == LinearFstData<A>::kEndOfSentence);
DCHECK(olabel > 0 || olabel == LinearFstData<A>::kStartOfSentence);
Weight weight(Weight::One());
data_->TakeTransition(BufferEnd(state), InternalBegin(state),
InternalEnd(state), ilabel, olabel, next_stub_,
&weight);
StateId nextstate = FindState(*next_stub_);
// Restore `next_stub_` to its size before the call
next_stub_->resize(delay_);
// In the actual arc, we use epsilons instead of boundaries.
return A(ilabel == LinearFstData<A>::kEndOfSentence ? 0 : ilabel,
olabel == LinearFstData<A>::kStartOfSentence ? 0 : olabel, weight,
nextstate);
}
template <class A>
inline void LinearTaggerFstImpl<A>::ExpandArcs(StateId s,
const std::vector<Label> &state,
Label ilabel,
std::vector<Label> *next_stub_) {
// Input label to constrain the output with, observed `delay_` steps
// back. `ilabel` is the input label to be put on the arc, which
// fires features.
Label obs_ilabel = ShiftBuffer(state, ilabel, next_stub_);
if (obs_ilabel == LinearFstData<A>::kStartOfSentence) {
// This happens when input is shorter than `delay_`.
PushArc(s, MakeArc(state, ilabel, LinearFstData<A>::kStartOfSentence,
next_stub_));
} else {
std::pair<typename std::vector<typename A::Label>::const_iterator,
typename std::vector<typename A::Label>::const_iterator> range =
data_->PossibleOutputLabels(obs_ilabel);
for (typename std::vector<typename A::Label>::const_iterator it =
range.first;
it != range.second; ++it)
PushArc(s, MakeArc(state, ilabel, *it, next_stub_));
}
}
// TODO(wuke): this has much in duplicate with `ExpandArcs()`
template <class A>
inline void LinearTaggerFstImpl<A>::AppendArcs(StateId /*s*/,
const std::vector<Label> &state,
Label ilabel,
std::vector<Label> *next_stub_,
std::vector<Arc> *arcs) {
// Input label to constrain the output with, observed `delay_` steps
// back. `ilabel` is the input label to be put on the arc, which
// fires features.
Label obs_ilabel = ShiftBuffer(state, ilabel, next_stub_);
if (obs_ilabel == LinearFstData<A>::kStartOfSentence) {
// This happens when input is shorter than `delay_`.
arcs->push_back(
MakeArc(state, ilabel, LinearFstData<A>::kStartOfSentence, next_stub_));
} else {
std::pair<typename std::vector<typename A::Label>::const_iterator,
typename std::vector<typename A::Label>::const_iterator> range =
data_->PossibleOutputLabels(obs_ilabel);
for (typename std::vector<typename A::Label>::const_iterator it =
range.first;
it != range.second; ++it)
arcs->push_back(MakeArc(state, ilabel, *it, next_stub_));
}
}
template <class A>
void LinearTaggerFstImpl<A>::Expand(StateId s) {
VLOG(3) << "Expand " << s;
state_stub_.clear();
FillState(s, &state_stub_);
// Precompute the first `delay_ - 1` elements in the buffer of
// next states, which are identical for different input/output.
next_stub_.clear();
next_stub_.resize(delay_);
if (delay_ > 0)
std::copy(BufferBegin(state_stub_) + 1, BufferEnd(state_stub_),
next_stub_.begin());
// Epsilon transition for flushing out the next observed input
if (!IsEmptyBuffer(BufferBegin(state_stub_), BufferEnd(state_stub_)))
ExpandArcs(s, state_stub_, LinearFstData<A>::kEndOfSentence, &next_stub_);
// Non-epsilon input when we haven't flushed
if (delay_ == 0 ||
*(BufferEnd(state_stub_) - 1) != LinearFstData<A>::kEndOfSentence)
for (Label ilabel = data_->MinInputLabel();
ilabel <= data_->MaxInputLabel(); ++ilabel)
ExpandArcs(s, state_stub_, ilabel, &next_stub_);
SetArcs(s);
}
template <class A>
void LinearTaggerFstImpl<A>::MatchInput(StateId s, Label ilabel,
std::vector<Arc> *arcs) {
state_stub_.clear();
FillState(s, &state_stub_);
// Precompute the first `delay_ - 1` elements in the buffer of
// next states, which are identical for different input/output.
next_stub_.clear();
next_stub_.resize(delay_);
if (delay_ > 0)
std::copy(BufferBegin(state_stub_) + 1, BufferEnd(state_stub_),
next_stub_.begin());
if (ilabel == 0) {
// Epsilon transition for flushing out the next observed input
if (!IsEmptyBuffer(BufferBegin(state_stub_), BufferEnd(state_stub_)))
AppendArcs(s, state_stub_, LinearFstData<A>::kEndOfSentence, &next_stub_,
arcs);
} else {
// Non-epsilon input when we haven't flushed
if (delay_ == 0 ||
*(BufferEnd(state_stub_) - 1) != LinearFstData<A>::kEndOfSentence)
AppendArcs(s, state_stub_, ilabel, &next_stub_, arcs);
}
}
template <class A>
inline LinearTaggerFstImpl<A> *LinearTaggerFstImpl<A>::Read(
std::istream &strm, const FstReadOptions &opts) { // NOLINT
std::unique_ptr<LinearTaggerFstImpl<A>> impl(new LinearTaggerFstImpl<A>());
FstHeader header;
if (!impl->ReadHeader(strm, opts, kMinFileVersion, &header)) {
return nullptr;
}
impl->data_ = std::shared_ptr<LinearFstData<A>>(LinearFstData<A>::Read(strm));
if (!impl->data_) {
return nullptr;
}
impl->delay_ = impl->data_->MaxFutureSize();
impl->ReserveStubSpace();
return impl.release();
}
} // namespace internal
// This class attaches interface to implementation and handles
// reference counting, delegating most methods to ImplToFst.
template <class A>
class LinearTaggerFst : public ImplToFst<internal::LinearTaggerFstImpl<A>> {
public:
friend class ArcIterator<LinearTaggerFst<A>>;
friend class StateIterator<LinearTaggerFst<A>>;
friend class LinearFstMatcherTpl<LinearTaggerFst<A>>;
typedef A Arc;
typedef typename A::Label Label;
typedef typename A::Weight Weight;
typedef typename A::StateId StateId;
typedef DefaultCacheStore<A> Store;
typedef typename Store::State State;
using Impl = internal::LinearTaggerFstImpl<A>;
LinearTaggerFst() : ImplToFst<Impl>(std::make_shared<Impl>()) {}
explicit LinearTaggerFst(LinearFstData<A> *data,
const SymbolTable *isyms = nullptr,
const SymbolTable *osyms = nullptr,
CacheOptions opts = CacheOptions())
: ImplToFst<Impl>(std::make_shared<Impl>(data, isyms, osyms, opts)) {}
explicit LinearTaggerFst(const Fst<A> &fst)
: ImplToFst<Impl>(std::make_shared<Impl>()) {
LOG(FATAL) << "LinearTaggerFst: no constructor from arbitrary FST.";
}
// See Fst<>::Copy() for doc.
LinearTaggerFst(const LinearTaggerFst<A> &fst, bool safe = false)
: ImplToFst<Impl>(fst, safe) {}
// Get a copy of this LinearTaggerFst. See Fst<>::Copy() for further doc.
LinearTaggerFst<A> *Copy(bool safe = false) const override {
return new LinearTaggerFst<A>(*this, safe);
}
inline void InitStateIterator(StateIteratorData<A> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<A> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
MatcherBase<A> *InitMatcher(MatchType match_type) const override {
return new LinearFstMatcherTpl<LinearTaggerFst<A>>(this, match_type);
}
static LinearTaggerFst<A> *Read(const string &filename) {
if (!filename.empty()) {
std::ifstream strm(filename,
std::ios_base::in | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "LinearTaggerFst::Read: Can't open file: " << filename;
return nullptr;
}
return Read(strm, FstReadOptions(filename));
} else {
return Read(std::cin, FstReadOptions("standard input"));
}
}
static LinearTaggerFst<A> *Read(std::istream &in, // NOLINT
const FstReadOptions &opts) {
auto *impl = Impl::Read(in, opts);
return impl ? new LinearTaggerFst<A>(std::shared_ptr<Impl>(impl)) : nullptr;
}
bool Write(const string &filename) const override {
if (!filename.empty()) {
std::ofstream strm(filename,
std::ios_base::out | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "LinearTaggerFst::Write: Can't open file: " << filename;
return false;
}
return Write(strm, FstWriteOptions(filename));
} else {
return Write(std::cout, FstWriteOptions("standard output"));
}
}
bool Write(std::ostream &strm, const FstWriteOptions &opts) const override {
return GetImpl()->Write(strm, opts);
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
explicit LinearTaggerFst(std::shared_ptr<Impl> impl)
: ImplToFst<Impl>(impl) {}
void operator=(const LinearTaggerFst<A> &fst) = delete;
};
// Specialization for LinearTaggerFst.
template <class Arc>
class StateIterator<LinearTaggerFst<Arc>>
: public CacheStateIterator<LinearTaggerFst<Arc>> {
public:
explicit StateIterator(const LinearTaggerFst<Arc> &fst)
: CacheStateIterator<LinearTaggerFst<Arc>>(fst, fst.GetMutableImpl()) {}
};
// Specialization for LinearTaggerFst.
template <class Arc>
class ArcIterator<LinearTaggerFst<Arc>>
: public CacheArcIterator<LinearTaggerFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const LinearTaggerFst<Arc> &fst, StateId s)
: CacheArcIterator<LinearTaggerFst<Arc>>(fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s);
}
};
template <class Arc>
inline void LinearTaggerFst<Arc>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<LinearTaggerFst<Arc>>(*this);
}
namespace internal {
// Implementation class for on-the-fly generated LinearClassifierFst with
// special optimization in matching.
template <class A>
class LinearClassifierFstImpl : public CacheImpl<A> {
public:
using FstImpl<A>::SetType;
using FstImpl<A>::SetProperties;
using FstImpl<A>::SetInputSymbols;
using FstImpl<A>::SetOutputSymbols;
using FstImpl<A>::WriteHeader;
using CacheBaseImpl<CacheState<A>>::PushArc;
using CacheBaseImpl<CacheState<A>>::HasArcs;
using CacheBaseImpl<CacheState<A>>::HasFinal;
using CacheBaseImpl<CacheState<A>>::HasStart;
using CacheBaseImpl<CacheState<A>>::SetArcs;
using CacheBaseImpl<CacheState<A>>::SetFinal;
using CacheBaseImpl<CacheState<A>>::SetStart;
typedef A Arc;
typedef typename A::Label Label;
typedef typename A::Weight Weight;
typedef typename A::StateId StateId;
typedef typename Collection<StateId, Label>::SetIterator NGramIterator;
// Constructs an empty FST by default.
LinearClassifierFstImpl()
: CacheImpl<A>(CacheOptions()),
data_(std::make_shared<LinearFstData<A>>()) {
SetType("linear-classifier");
num_classes_ = 0;
num_groups_ = 0;
}
// Constructs the FST with given data storage, number of classes and
// symbol tables.
LinearClassifierFstImpl(const LinearFstData<Arc> *data, size_t num_classes,
const SymbolTable *isyms, const SymbolTable *osyms,
CacheOptions opts)
: CacheImpl<A>(opts),
data_(data),
num_classes_(num_classes),
num_groups_(data_->NumGroups() / num_classes_) {
SetType("linear-classifier");
SetProperties(kILabelSorted, kFstProperties);
SetInputSymbols(isyms);
SetOutputSymbols(osyms);
ReserveStubSpace();
}
// Copy by sharing the underlying data storage.
LinearClassifierFstImpl(const LinearClassifierFstImpl &impl)
: CacheImpl<A>(impl),
data_(impl.data_),
num_classes_(impl.num_classes_),
num_groups_(impl.num_groups_) {
SetType("linear-classifier");
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
ReserveStubSpace();
}
StateId Start() {
if (!HasStart()) {
StateId start = FindStartState();
SetStart(start);
}
return CacheImpl<A>::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) {
state_stub_.clear();
FillState(s, &state_stub_);
SetFinal(s, FinalWeight(state_stub_));
}
return CacheImpl<A>::Final(s);
}
size_t NumArcs(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<A>::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<A>::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<A>::NumOutputEpsilons(s);
}
void InitArcIterator(StateId s, ArcIteratorData<A> *data) {
if (!HasArcs(s)) Expand(s);
CacheImpl<A>::InitArcIterator(s, data);
}
// Computes the outgoing transitions from a state, creating new
// destination states as needed.
void Expand(StateId s);
// Appends to `arcs` all out-going arcs from state `s` that matches
// `label` as the input label.
void MatchInput(StateId s, Label ilabel, std::vector<Arc> *arcs);
static LinearClassifierFstImpl<A> *Read(std::istream &strm,
const FstReadOptions &opts);
bool Write(std::ostream &strm, const FstWriteOptions &opts) const {
FstHeader header;
header.SetStart(kNoStateId);
WriteHeader(strm, opts, kFileVersion, &header);
data_->Write(strm);
WriteType(strm, num_classes_);
if (!strm) {
LOG(ERROR) << "LinearClassifierFst::Write: Write failed: " << opts.source;
return false;
}
return true;
}
private:
static const int kMinFileVersion;
static const int kFileVersion;
// A collection of functions to access parts of the state tuple. A
// state tuple is a vector of `Label`s with two parts:
// [prediction] [internal].
//
// - [prediction] is a single label of the predicted class. A state
// must have a positive class label, unless it is the start state.
//
// - [internal] is the internal state tuple for `LinearFstData` of
// the given class; or kNoTrieNodeId's if in start state.
Label &Prediction(std::vector<Label> &state) { return state[0]; } // NOLINT
Label Prediction(const std::vector<Label> &state) const { return state[0]; }
Label &InternalAt(std::vector<Label> &state, int index) { // NOLINT
return state[index + 1];
}
Label InternalAt(const std::vector<Label> &state, int index) const {
return state[index + 1];
}
// The size of state tuples are fixed, reserve them in stubs
void ReserveStubSpace() {
size_t size = 1 + num_groups_;
state_stub_.reserve(size);
next_stub_.reserve(size);
}
// Computes the start state tuple and maps it to the start state id.
StateId FindStartState() {
// A start state tuple has no prediction
state_stub_.clear();
state_stub_.push_back(kNoLabel);
// For a start state, we don't yet know where we are in the tries.
for (size_t i = 0; i < num_groups_; ++i)
state_stub_.push_back(kNoTrieNodeId);
return FindState(state_stub_);
}
// Tests if the state tuple represents the start state.
bool IsStartState(const std::vector<Label> &state) const {
return state[0] == kNoLabel;
}
// Computes the actual group id in the data storage.
int GroupId(Label pred, int group) const {
return group * num_classes_ + pred - 1;
}
// Finds out the final weight of the given state. A state is final
// iff it is not the start.
Weight FinalWeight(const std::vector<Label> &state) const {
if (IsStartState(state)) {
return Weight::Zero();
}
Label pred = Prediction(state);
DCHECK_GT(pred, 0);
DCHECK_LE(pred, num_classes_);
Weight final_weight = Weight::One();
for (size_t group = 0; group < num_groups_; ++group) {
int group_id = GroupId(pred, group);
int trie_state = InternalAt(state, group);
final_weight =
Times(final_weight, data_->GroupFinalWeight(group_id, trie_state));
}
return final_weight;
}
// Finds state corresponding to an n-gram. Creates new state if n-gram not
// found.
StateId FindState(const std::vector<Label> &ngram) {
StateId sparse = ngrams_.FindId(ngram, true);
StateId dense = condensed_.FindId(sparse, true);
return dense;
}
// Appends after `output` the state tuple corresponding to the state id. The
// state id must exist.
void FillState(StateId s, std::vector<Label> *output) {
s = condensed_.FindEntry(s);
for (NGramIterator it = ngrams_.FindSet(s); !it.Done(); it.Next()) {
Label label = it.Element();
output->push_back(label);
}
}
std::shared_ptr<const LinearFstData<A>> data_;
// Division of groups in `data_`; num_classes_ * num_groups_ ==
// data_->NumGroups().
size_t num_classes_, num_groups_;
// Mapping from internal state tuple to *non-consecutive* ids
Collection<StateId, Label> ngrams_;
// Mapping from non-consecutive id to actual state id
CompactHashBiTable<StateId, StateId, std::hash<StateId>> condensed_;
// Two frequently used vectors, reuse to avoid repeated heap
// allocation
std::vector<Label> state_stub_, next_stub_;
void operator=(const LinearClassifierFstImpl<A> &) = delete;
};
template <class A>
const int LinearClassifierFstImpl<A>::kMinFileVersion = 0;
template <class A>
const int LinearClassifierFstImpl<A>::kFileVersion = 0;
template <class A>
void LinearClassifierFstImpl<A>::Expand(StateId s) {
VLOG(3) << "Expand " << s;
state_stub_.clear();
FillState(s, &state_stub_);
next_stub_.clear();
next_stub_.resize(1 + num_groups_);
if (IsStartState(state_stub_)) {
// Make prediction
for (Label pred = 1; pred <= num_classes_; ++pred) {
Prediction(next_stub_) = pred;
for (int i = 0; i < num_groups_; ++i)
InternalAt(next_stub_, i) = data_->GroupStartState(GroupId(pred, i));
PushArc(s, A(0, pred, Weight::One(), FindState(next_stub_)));
}
} else {
Label pred = Prediction(state_stub_);
DCHECK_GT(pred, 0);
DCHECK_LE(pred, num_classes_);
for (Label ilabel = data_->MinInputLabel();
ilabel <= data_->MaxInputLabel(); ++ilabel) {
Prediction(next_stub_) = pred;
Weight weight = Weight::One();
for (int i = 0; i < num_groups_; ++i)
InternalAt(next_stub_, i) =
data_->GroupTransition(GroupId(pred, i), InternalAt(state_stub_, i),
ilabel, pred, &weight);
PushArc(s, A(ilabel, 0, weight, FindState(next_stub_)));
}
}
SetArcs(s);
}
template <class A>
void LinearClassifierFstImpl<A>::MatchInput(StateId s, Label ilabel,
std::vector<Arc> *arcs) {
state_stub_.clear();
FillState(s, &state_stub_);
next_stub_.clear();
next_stub_.resize(1 + num_groups_);
if (IsStartState(state_stub_)) {
// Make prediction if `ilabel` is epsilon.
if (ilabel == 0) {
for (Label pred = 1; pred <= num_classes_; ++pred) {
Prediction(next_stub_) = pred;
for (int i = 0; i < num_groups_; ++i)
InternalAt(next_stub_, i) = data_->GroupStartState(GroupId(pred, i));
arcs->push_back(A(0, pred, Weight::One(), FindState(next_stub_)));
}
}
} else if (ilabel != 0) {
Label pred = Prediction(state_stub_);
Weight weight = Weight::One();
Prediction(next_stub_) = pred;
for (int i = 0; i < num_groups_; ++i)
InternalAt(next_stub_, i) = data_->GroupTransition(
GroupId(pred, i), InternalAt(state_stub_, i), ilabel, pred, &weight);
arcs->push_back(A(ilabel, 0, weight, FindState(next_stub_)));
}
}
template <class A>
inline LinearClassifierFstImpl<A> *LinearClassifierFstImpl<A>::Read(
std::istream &strm, const FstReadOptions &opts) {
std::unique_ptr<LinearClassifierFstImpl<A>> impl(
new LinearClassifierFstImpl<A>());
FstHeader header;
if (!impl->ReadHeader(strm, opts, kMinFileVersion, &header)) {
return nullptr;
}
impl->data_ = std::shared_ptr<LinearFstData<A>>(LinearFstData<A>::Read(strm));
if (!impl->data_) {
return nullptr;
}
ReadType(strm, &impl->num_classes_);
if (!strm) {
return nullptr;
}
impl->num_groups_ = impl->data_->NumGroups() / impl->num_classes_;
if (impl->num_groups_ * impl->num_classes_ != impl->data_->NumGroups()) {
FSTERROR() << "Total number of feature groups is not a multiple of the "
"number of classes: num groups = "
<< impl->data_->NumGroups()
<< ", num classes = " << impl->num_classes_;
return nullptr;
}
impl->ReserveStubSpace();
return impl.release();
}
} // namespace internal
// This class attaches interface to implementation and handles
// reference counting, delegating most methods to ImplToFst.
template <class A>
class LinearClassifierFst
: public ImplToFst<internal::LinearClassifierFstImpl<A>> {
public:
friend class ArcIterator<LinearClassifierFst<A>>;
friend class StateIterator<LinearClassifierFst<A>>;
friend class LinearFstMatcherTpl<LinearClassifierFst<A>>;
typedef A Arc;
typedef typename A::Label Label;
typedef typename A::Weight Weight;
typedef typename A::StateId StateId;
typedef DefaultCacheStore<A> Store;
typedef typename Store::State State;
using Impl = internal::LinearClassifierFstImpl<A>;
LinearClassifierFst() : ImplToFst<Impl>(std::make_shared<Impl>()) {}
explicit LinearClassifierFst(LinearFstData<A> *data, size_t num_classes,
const SymbolTable *isyms = nullptr,
const SymbolTable *osyms = nullptr,
CacheOptions opts = CacheOptions())
: ImplToFst<Impl>(
std::make_shared<Impl>(data, num_classes, isyms, osyms, opts)) {}
explicit LinearClassifierFst(const Fst<A> &fst)
: ImplToFst<Impl>(std::make_shared<Impl>()) {
LOG(FATAL) << "LinearClassifierFst: no constructor from arbitrary FST.";
}
// See Fst<>::Copy() for doc.
LinearClassifierFst(const LinearClassifierFst<A> &fst, bool safe = false)
: ImplToFst<Impl>(fst, safe) {}
// Get a copy of this LinearClassifierFst. See Fst<>::Copy() for further doc.
LinearClassifierFst<A> *Copy(bool safe = false) const override {
return new LinearClassifierFst<A>(*this, safe);
}
inline void InitStateIterator(StateIteratorData<A> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<A> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
MatcherBase<A> *InitMatcher(MatchType match_type) const override {
return new LinearFstMatcherTpl<LinearClassifierFst<A>>(this, match_type);
}
static LinearClassifierFst<A> *Read(const string &filename) {
if (!filename.empty()) {
std::ifstream strm(filename,
std::ios_base::in | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "LinearClassifierFst::Read: Can't open file: "
<< filename;
return nullptr;
}
return Read(strm, FstReadOptions(filename));
} else {
return Read(std::cin, FstReadOptions("standard input"));
}
}
static LinearClassifierFst<A> *Read(std::istream &in,
const FstReadOptions &opts) {
auto *impl = Impl::Read(in, opts);
return impl ? new LinearClassifierFst<A>(std::shared_ptr<Impl>(impl))
: nullptr;
}
bool Write(const string &filename) const override {
if (!filename.empty()) {
std::ofstream strm(filename,
std::ios_base::out | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "ProdLmFst::Write: Can't open file: " << filename;
return false;
}
return Write(strm, FstWriteOptions(filename));
} else {
return Write(std::cout, FstWriteOptions("standard output"));
}
}
bool Write(std::ostream &strm, const FstWriteOptions &opts) const override {
return GetImpl()->Write(strm, opts);
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
explicit LinearClassifierFst(std::shared_ptr<Impl> impl)
: ImplToFst<Impl>(impl) {}
void operator=(const LinearClassifierFst<A> &fst) = delete;
};
// Specialization for LinearClassifierFst.
template <class Arc>
class StateIterator<LinearClassifierFst<Arc>>
: public CacheStateIterator<LinearClassifierFst<Arc>> {
public:
explicit StateIterator(const LinearClassifierFst<Arc> &fst)
: CacheStateIterator<LinearClassifierFst<Arc>>(fst,
fst.GetMutableImpl()) {}
};
// Specialization for LinearClassifierFst.
template <class Arc>
class ArcIterator<LinearClassifierFst<Arc>>
: public CacheArcIterator<LinearClassifierFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const LinearClassifierFst<Arc> &fst, StateId s)
: CacheArcIterator<LinearClassifierFst<Arc>>(fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s);
}
};
template <class Arc>
inline void LinearClassifierFst<Arc>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<LinearClassifierFst<Arc>>(*this);
}
// Specialized Matcher for LinearFsts. This matcher only supports
// matching from the input side. This is intentional because comparing
// the scores of different input sequences with the same output
// sequence is meaningless in a discriminative model.
template <class F>
class LinearFstMatcherTpl : public MatcherBase<typename F::Arc> {
public:
typedef typename F::Arc Arc;
typedef typename Arc::Label Label;
typedef typename Arc::Weight Weight;
typedef typename Arc::StateId StateId;
typedef F FST;
// This makes a copy of the FST.
LinearFstMatcherTpl(const FST &fst, MatchType match_type)
: owned_fst_(fst.Copy()),
fst_(*owned_fst_),
match_type_(match_type),
s_(kNoStateId),
current_loop_(false),
loop_(kNoLabel, 0, Weight::One(), kNoStateId),
cur_arc_(0),
error_(false) {
switch (match_type_) {
case MATCH_INPUT:
case MATCH_OUTPUT:
case MATCH_NONE:
break;
default:
FSTERROR() << "LinearFstMatcherTpl: Bad match type";
match_type_ = MATCH_NONE;
error_ = true;
}
}
// This doesn't copy the FST.
LinearFstMatcherTpl(const FST *fst, MatchType match_type)
: fst_(*fst),
match_type_(match_type),
s_(kNoStateId),
current_loop_(false),
loop_(kNoLabel, 0, Weight::One(), kNoStateId),
cur_arc_(0),
error_(false) {
switch (match_type_) {
case MATCH_INPUT:
case MATCH_OUTPUT:
case MATCH_NONE:
break;
default:
FSTERROR() << "LinearFstMatcherTpl: Bad match type";
match_type_ = MATCH_NONE;
error_ = true;
}
}
// This makes a copy of the FST.
LinearFstMatcherTpl(const LinearFstMatcherTpl<F> &matcher, bool safe = false)
: owned_fst_(matcher.fst_.Copy(safe)),
fst_(*owned_fst_),
match_type_(matcher.match_type_),
s_(kNoStateId),
current_loop_(false),
loop_(matcher.loop_),
cur_arc_(0),
error_(matcher.error_) {}
LinearFstMatcherTpl<F> *Copy(bool safe = false) const override {
return new LinearFstMatcherTpl<F>(*this, safe);
}
MatchType Type(bool /*test*/) const override {
// `MATCH_INPUT` is the only valid type
return match_type_ == MATCH_INPUT ? match_type_ : MATCH_NONE;
}
void SetState(StateId s) final {
if (s_ == s) return;
s_ = s;
// `MATCH_INPUT` is the only valid type
if (match_type_ != MATCH_INPUT) {
FSTERROR() << "LinearFstMatcherTpl: Bad match type";
error_ = true;
}
loop_.nextstate = s;
}
bool Find(Label label) final {
if (error_) {
current_loop_ = false;
return false;
}
current_loop_ = label == 0;
if (label == kNoLabel) label = 0;
arcs_.clear();
cur_arc_ = 0;
fst_.GetMutableImpl()->MatchInput(s_, label, &arcs_);
return current_loop_ || !arcs_.empty();
}
bool Done() const final {
return !(current_loop_ || cur_arc_ < arcs_.size());
}
const Arc &Value() const final {
return current_loop_ ? loop_ : arcs_[cur_arc_];
}
void Next() final {
if (current_loop_)
current_loop_ = false;
else
++cur_arc_;
}
std::ptrdiff_t Priority(StateId s) final { return kRequirePriority; }
const FST &GetFst() const override { return fst_; }
uint64_t Properties(uint64_t props) const override {
if (error_) props |= kError;
return props;
}
uint32_t Flags() const override { return kRequireMatch; }
private:
std::unique_ptr<const FST> owned_fst_;
const FST &fst_;
MatchType match_type_; // Type of match to perform.
StateId s_; // Current state.
bool current_loop_; // Current arc is the implicit loop.
Arc loop_; // For non-consuming symbols.
// All out-going arcs matching the label in last Find() call.
std::vector<Arc> arcs_;
size_t cur_arc_; // Index to the arc that `Value()` should return.
bool error_; // Error encountered.
};
} // namespace fst
#endif // FST_EXTENSIONS_LINEAR_LINEAR_FST_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/edit-fst.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// An FST implementation that allows non-destructive edit operations on an
// existing FST.
//
// The EditFst class enables non-destructive edit operations on a wrapped
// ExpandedFst. The implementation uses copy-on-write semantics at the node
// level: if a user has an underlying fst on which he or she wants to perform a
// relatively small number of edits (read: mutations), then this implementation
// will copy the edited node to an internal MutableFst and perform any edits in
// situ on that copied node. This class supports all the methods of MutableFst
// except for DeleteStates(const std::vector<StateId> &); thus, new nodes may
// also be
// added, and one may add transitions from existing nodes of the wrapped fst to
// new nodes.
//
// N.B.: The documentation for Fst::Copy(true) says that its behavior is
// undefined if invoked on an fst that has already been accessed. This class
// requires that the Fst implementation it wraps provides consistent, reliable
// behavior when its Copy(true) method is invoked, where consistent means
// the graph structure, graph properties and state numbering and do not change.
// VectorFst and CompactFst, for example, are both well-behaved in this regard.
#ifndef FST_EDIT_FST_H_
#define FST_EDIT_FST_H_
#include <string>
#include <unordered_map>
#include <vector>
#include <fst/log.h>
#include <fst/cache.h>
namespace fst {
namespace internal {
// The EditFstData class is a container for all mutable data for EditFstImpl;
// also, this class provides most of the actual implementation of what EditFst
// does (that is, most of EditFstImpl's methods delegate to methods in this, the
// EditFstData class). Instances of this class are reference-counted and can be
// shared between otherwise independent EditFstImpl instances. This scheme
// allows EditFstImpl to implement the thread-safe, copy-on-write semantics
// required by Fst::Copy(true).
//
// template parameters:
// A the type of arc to use
// WrappedFstT the type of fst wrapped by the EditFst instance that
// this EditFstData instance is backing
// MutableFstT the type of mutable fst to use internally for edited states;
// crucially, MutableFstT::Copy(false) *must* yield an fst that is
// thread-safe for reading (VectorFst, for example, has this property)
template <typename Arc, typename WrappedFstT = ExpandedFst<Arc>,
typename MutableFstT = VectorFst<Arc>>
class EditFstData {
public:
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
EditFstData() : num_new_states_(0) {}
EditFstData(const EditFstData &other)
: edits_(other.edits_),
external_to_internal_ids_(other.external_to_internal_ids_),
edited_final_weights_(other.edited_final_weights_),
num_new_states_(other.num_new_states_) {}
~EditFstData() {}
static EditFstData<Arc, WrappedFstT, MutableFstT> *Read(
std::istream &strm, const FstReadOptions &opts);
bool Write(std::ostream &strm, const FstWriteOptions &opts) const {
// Serialize all private data members of this class.
FstWriteOptions edits_opts(opts);
edits_opts.write_header = true; // Force writing contained header.
edits_.Write(strm, edits_opts);
WriteType(strm, external_to_internal_ids_);
WriteType(strm, edited_final_weights_);
WriteType(strm, num_new_states_);
if (!strm) {
LOG(ERROR) << "EditFstData::Write: Write failed: " << opts.source;
return false;
}
return true;
}
StateId NumNewStates() const { return num_new_states_; }
// accessor methods for the fst holding edited states
StateId EditedStart() const { return edits_.Start(); }
Weight Final(StateId s, const WrappedFstT *wrapped) const {
auto final_weight_it = GetFinalWeightIterator(s);
if (final_weight_it == NotInFinalWeightMap()) {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->Final(s)
: edits_.Final(it->second);
} else {
return final_weight_it->second;
}
}
size_t NumArcs(StateId s, const WrappedFstT *wrapped) const {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->NumArcs(s)
: edits_.NumArcs(it->second);
}
size_t NumInputEpsilons(StateId s, const WrappedFstT *wrapped) const {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->NumInputEpsilons(s)
: edits_.NumInputEpsilons(it->second);
}
size_t NumOutputEpsilons(StateId s, const WrappedFstT *wrapped) const {
auto it = GetEditedIdMapIterator(s);
return it == NotInEditedMap() ? wrapped->NumOutputEpsilons(s)
: edits_.NumOutputEpsilons(it->second);
}
void SetEditedProperties(uint64_t props, uint64_t mask) {
edits_.SetProperties(props, mask);
}
// Non-const MutableFst operations.
// Sets the start state for this FST.
void SetStart(StateId s) { edits_.SetStart(s); }
// Sets the final state for this FST.
Weight SetFinal(StateId s, Weight w, const WrappedFstT *wrapped) {
Weight old_weight = Final(s, wrapped);
auto it = GetEditedIdMapIterator(s);
// If we haven't already edited state s, don't add it to edited_ (which can
// be expensive if s has many transitions); just use the
// edited_final_weights_ map.
if (it == NotInEditedMap()) {
edited_final_weights_[s] = w;
} else {
edits_.SetFinal(GetEditableInternalId(s, wrapped), w);
}
return old_weight;
}
// Adds a new state to this FST, initially with no arcs.
StateId AddState(StateId curr_num_states) {
StateId internal_state_id = edits_.AddState();
StateId external_state_id = curr_num_states;
external_to_internal_ids_[external_state_id] = internal_state_id;
num_new_states_++;
return external_state_id;
}
// Adds the specified arc to the specified state of this FST.
const Arc *AddArc(StateId s, const Arc &arc, const WrappedFstT *wrapped) {
const auto internal_id = GetEditableInternalId(s, wrapped);
const auto num_arcs = edits_.NumArcs(internal_id);
ArcIterator<MutableFstT> arc_it(edits_, internal_id);
const Arc *prev_arc = nullptr;
if (num_arcs > 0) {
// grab the final arc associated with this state in edits_
arc_it.Seek(num_arcs - 1);
prev_arc = &(arc_it.Value());
}
edits_.AddArc(internal_id, arc);
return prev_arc;
}
void DeleteStates() {
edits_.DeleteStates();
num_new_states_ = 0;
external_to_internal_ids_.clear();
edited_final_weights_.clear();
}
// Removes all but the first n outgoing arcs of the specified state.
void DeleteArcs(StateId s, size_t n, const WrappedFstT *wrapped) {
edits_.DeleteArcs(GetEditableInternalId(s, wrapped), n);
}
// Removes all outgoing arcs from the specified state.
void DeleteArcs(StateId s, const WrappedFstT *wrapped) {
edits_.DeleteArcs(GetEditableInternalId(s, wrapped));
}
// End methods for non-const MutableFst operations.
// Provides information for the generic arc iterator.
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data,
const WrappedFstT *wrapped) const {
auto id_map_it = GetEditedIdMapIterator(s);
if (id_map_it == NotInEditedMap()) {
VLOG(3) << "EditFstData::InitArcIterator: iterating on state " << s
<< " of original fst";
wrapped->InitArcIterator(s, data);
} else {
VLOG(2) << "EditFstData::InitArcIterator: iterating on edited state " << s
<< " (internal state id: " << id_map_it->second << ")";
edits_.InitArcIterator(id_map_it->second, data);
}
}
// Provides information for the generic mutable arc iterator.
void InitMutableArcIterator(StateId s, MutableArcIteratorData<Arc> *data,
const WrappedFstT *wrapped) {
data->base = new MutableArcIterator<MutableFstT>(
&edits_, GetEditableInternalId(s, wrapped));
}
// Prints out the map from external to internal state id's (for debugging
// purposes).
void PrintMap() {
for (auto map_it = external_to_internal_ids_.begin();
map_it != NotInEditedMap(); ++map_it) {
LOG(INFO) << "(external,internal)=(" << map_it->first << ","
<< map_it->second << ")";
}
}
private:
// Returns the iterator of the map from external to internal state id's
// of edits_ for the specified external state id.
typename std::unordered_map<StateId, StateId>::const_iterator
GetEditedIdMapIterator(StateId s) const {
return external_to_internal_ids_.find(s);
}
typename std::unordered_map<StateId, StateId>::const_iterator
NotInEditedMap() const {
return external_to_internal_ids_.end();
}
typename std::unordered_map<StateId, Weight>::const_iterator
GetFinalWeightIterator(StateId s) const {
return edited_final_weights_.find(s);
}
typename std::unordered_map<StateId, Weight>::const_iterator
NotInFinalWeightMap() const {
return edited_final_weights_.end();
}
// Returns the internal state ID of the specified external ID if the state has
// already been made editable, or else copies the state from wrapped_ to
// edits_ and returns the state id of the newly editable state in edits_.
StateId GetEditableInternalId(StateId s, const WrappedFstT *wrapped) {
auto id_map_it = GetEditedIdMapIterator(s);
if (id_map_it == NotInEditedMap()) {
StateId new_internal_id = edits_.AddState();
VLOG(2) << "EditFstData::GetEditableInternalId: editing state " << s
<< " of original fst; new internal state id:" << new_internal_id;
external_to_internal_ids_[s] = new_internal_id;
for (ArcIterator<Fst<Arc>> arc_iterator(*wrapped, s);
!arc_iterator.Done(); arc_iterator.Next()) {
edits_.AddArc(new_internal_id, arc_iterator.Value());
}
// Copies the final weight.
auto final_weight_it = GetFinalWeightIterator(s);
if (final_weight_it == NotInFinalWeightMap()) {
edits_.SetFinal(new_internal_id, wrapped->Final(s));
} else {
edits_.SetFinal(new_internal_id, final_weight_it->second);
edited_final_weights_.erase(s);
}
return new_internal_id;
} else {
return id_map_it->second;
}
}
// A mutable FST (by default, a VectorFst) to contain new states, and/or
// copies of states from a wrapped ExpandedFst that have been modified in
// some way.
MutableFstT edits_;
// A mapping from external state IDs to the internal IDs of states that
// appear in edits_.
std::unordered_map<StateId, StateId> external_to_internal_ids_;
// A mapping from external state IDs to final state weights assigned to
// those states. The states in this map are *only* those whose final weight
// has been modified; if any other part of the state has been modified,
// the entire state is copied to edits_, and all modifications reside there.
std::unordered_map<StateId, Weight> edited_final_weights_;
// The number of new states added to this mutable fst impl, which is <= the
// number of states in edits_ (since edits_ contains both edited *and* new
// states).
StateId num_new_states_;
};
// EditFstData method implementations: just the Read method.
template <typename A, typename WrappedFstT, typename MutableFstT>
EditFstData<A, WrappedFstT, MutableFstT> *
EditFstData<A, WrappedFstT, MutableFstT>::Read(std::istream &strm,
const FstReadOptions &opts) {
auto *data = new EditFstData<A, WrappedFstT, MutableFstT>();
// next read in MutabelFstT machine that stores edits
FstReadOptions edits_opts(opts);
// Contained header was written out, so read it in.
edits_opts.header = nullptr;
// Because our internal representation of edited states is a solid object
// of type MutableFstT (defaults to VectorFst<A>) and not a pointer,
// and because the static Read method allocates a new object on the heap,
// we need to call Read, check if there was a failure, use
// MutableFstT::operator= to assign the object (not the pointer) to the
// edits_ data member (which will increase the ref count by 1 on the impl)
// and, finally, delete the heap-allocated object.
std::unique_ptr<MutableFstT> edits(MutableFstT::Read(strm, edits_opts));
if (!edits) return nullptr;
data->edits_ = *edits;
edits.reset();
// Finally, reads in rest of private data members.
ReadType(strm, &data->external_to_internal_ids_);
ReadType(strm, &data->edited_final_weights_);
ReadType(strm, &data->num_new_states_);
if (!strm) {
LOG(ERROR) << "EditFst::Read: read failed: " << opts.source;
return nullptr;
}
return data;
}
// This class enables non-destructive edit operations on a wrapped ExpandedFst.
// The implementation uses copy-on-write semantics at the node level: if a user
// has an underlying fst on which he or she wants to perform a relatively small
// number of edits (read: mutations), then this implementation will copy the
// edited node to an internal MutableFst and perform any edits in situ on that
// copied node. This class supports all the methods of MutableFst except for
// DeleteStates(const std::vector<StateId> &); thus, new nodes may also be
// added, and
// one may add transitions from existing nodes of the wrapped fst to new nodes.
//
// template parameters:
// A the type of arc to use
// WrappedFstT the type of fst wrapped by the EditFst instance that
// this EditFstImpl instance is backing
// MutableFstT the type of mutable fst to use internally for edited states;
// crucially, MutableFstT::Copy(false) *must* yield an fst that is
// thread-safe for reading (VectorFst, for example, has this property)
template <typename A, typename WrappedFstT = ExpandedFst<A>,
typename MutableFstT = VectorFst<A>>
class EditFstImpl : public FstImpl<A> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using FstImpl<Arc>::WriteHeader;
// Constructs an editable FST implementation with no states. Effectively, this
// initially-empty fst will in every way mimic the behavior of a
// VectorFst---more precisely, a VectorFstImpl instance---but with slightly
// slower performance (by a constant factor), due to the fact that
// this class maintains a mapping between external state id's and
// their internal equivalents.
EditFstImpl() : wrapped_(new MutableFstT()) {
FstImpl<Arc>::SetType("edit");
InheritPropertiesFromWrapped();
data_ = std::make_shared<EditFstData<Arc, WrappedFstT, MutableFstT>>();
}
// Wraps the specified ExpandedFst. This constructor requires that the
// specified Fst is an ExpandedFst instance. This requirement is only enforced
// at runtime. (See below for the reason.)
//
// This library uses the pointer-to-implementation or "PIMPL" design pattern.
// In particular, to make it convenient to bind an implementation class to its
// interface, there are a pair of template "binder" classes, one for immutable
// and one for mutable fst's (ImplToFst and ImplToMutableFst, respectively).
// As it happens, the API for the ImplToMutableFst<I,F> class requires that
// the implementation class--the template parameter "I"--have a constructor
// taking a const Fst<A> reference. Accordingly, the constructor here must
// perform a static_cast to the WrappedFstT type required by EditFst and
// therefore EditFstImpl.
explicit EditFstImpl(const Fst<Arc> &wrapped)
: wrapped_(static_cast<WrappedFstT *>(wrapped.Copy())) {
FstImpl<Arc>::SetType("edit");
data_ = std::make_shared<EditFstData<Arc, WrappedFstT, MutableFstT>>();
// have edits_ inherit all properties from wrapped_
data_->SetEditedProperties(wrapped_->Properties(kFstProperties, false),
kFstProperties);
InheritPropertiesFromWrapped();
}
// A copy constructor for this implementation class, used to implement
// the Copy() method of the Fst interface.
EditFstImpl(const EditFstImpl &impl)
: FstImpl<Arc>(),
wrapped_(static_cast<WrappedFstT *>(impl.wrapped_->Copy(true))),
data_(impl.data_) {
SetProperties(impl.Properties());
}
// const Fst/ExpandedFst operations, declared in the Fst and ExpandedFst
// interfaces
StateId Start() const {
const auto edited_start = data_->EditedStart();
return edited_start == kNoStateId ? wrapped_->Start() : edited_start;
}
Weight Final(StateId s) const { return data_->Final(s, wrapped_.get()); }
size_t NumArcs(StateId s) const { return data_->NumArcs(s, wrapped_.get()); }
size_t NumInputEpsilons(StateId s) const {
return data_->NumInputEpsilons(s, wrapped_.get());
}
size_t NumOutputEpsilons(StateId s) const {
return data_->NumOutputEpsilons(s, wrapped_.get());
}
StateId NumStates() const {
return wrapped_->NumStates() + data_->NumNewStates();
}
static EditFstImpl<Arc, WrappedFstT, MutableFstT> *Read(
std::istream &strm, const FstReadOptions &opts);
bool Write(std::ostream &strm, const FstWriteOptions &opts) const {
FstHeader hdr;
hdr.SetStart(Start());
hdr.SetNumStates(NumStates());
FstWriteOptions header_opts(opts);
// Allows the contained FST to hold any symbols.
header_opts.write_isymbols = false;
header_opts.write_osymbols = false;
WriteHeader(strm, header_opts, kFileVersion, &hdr);
// First, serializes the wrapped FST to stream.
FstWriteOptions wrapped_opts(opts);
// Forcse writing the contained header.
wrapped_opts.write_header = true;
wrapped_->Write(strm, wrapped_opts);
data_->Write(strm, opts);
strm.flush();
if (!strm) {
LOG(ERROR) << "EditFst::Write: Write failed: " << opts.source;
return false;
}
return true;
}
// Sets the start state for this FST.
void SetStart(StateId s) {
MutateCheck();
data_->SetStart(s);
SetProperties(SetStartProperties(FstImpl<Arc>::Properties()));
}
// Sets the final state for this fst.
void SetFinal(StateId s, Weight weight) {
MutateCheck();
Weight old_weight = data_->SetFinal(s, weight, wrapped_.get());
SetProperties(
SetFinalProperties(FstImpl<Arc>::Properties(), old_weight, weight));
}
// Adds a new state to this fst, initially with no arcs.
StateId AddState() {
MutateCheck();
SetProperties(AddStateProperties(FstImpl<Arc>::Properties()));
return data_->AddState(NumStates());
}
// Adds the specified arc to the specified state of this fst.
void AddArc(StateId s, const Arc &arc) {
MutateCheck();
const auto *prev_arc = data_->AddArc(s, arc, wrapped_.get());
SetProperties(
AddArcProperties(FstImpl<Arc>::Properties(), s, arc, prev_arc));
}
void DeleteStates(const std::vector<StateId> &dstates) {
FSTERROR() << ": EditFstImpl::DeleteStates(const std::vector<StateId>&): "
<< " not implemented";
SetProperties(kError, kError);
}
// Deletes all states in this fst.
void DeleteStates();
// Removes all but the first n outgoing arcs of the specified state.
void DeleteArcs(StateId s, size_t n) {
MutateCheck();
data_->DeleteArcs(s, n, wrapped_.get());
SetProperties(DeleteArcsProperties(FstImpl<Arc>::Properties()));
}
// Removes all outgoing arcs from the specified state.
void DeleteArcs(StateId s) {
MutateCheck();
data_->DeleteArcs(s, wrapped_.get());
SetProperties(DeleteArcsProperties(FstImpl<Arc>::Properties()));
}
void ReserveStates(StateId s) {}
void ReserveArcs(StateId s, size_t n) {}
// Ends non-const MutableFst operations.
// Provides information for the generic state iterator.
void InitStateIterator(StateIteratorData<Arc> *data) const {
data->base = nullptr;
data->nstates = NumStates();
}
// Provides information for the generic arc iterator.
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const {
data_->InitArcIterator(s, data, wrapped_.get());
}
// Provides information for the generic mutable arc iterator.
void InitMutableArcIterator(StateId s, MutableArcIteratorData<Arc> *data) {
MutateCheck();
data_->InitMutableArcIterator(s, data, wrapped_.get());
}
private:
// Properties always true of this FST class.
static constexpr uint64_t kStaticProperties = kExpanded | kMutable;
// Current file format version.
static constexpr int kFileVersion = 2;
// Minimum file format version supported
static constexpr int kMinFileVersion = 2;
// Causes this FST to inherit all the properties from its wrapped FST, except
// for the two properties that always apply to EditFst instances: kExpanded
// and kMutable.
void InheritPropertiesFromWrapped() {
SetProperties(wrapped_->Properties(kCopyProperties, false) |
kStaticProperties);
SetInputSymbols(wrapped_->InputSymbols());
SetOutputSymbols(wrapped_->OutputSymbols());
}
// This method ensures that any operations that alter the mutable data
// portion of this EditFstImpl cause the data_ member to be copied when its
// reference count is greater than 1. Note that this method is distinct from
// MutableFst::Mutate, which gets invoked whenever one of the basic mutation
// methods defined in MutableFst is invoked, such as SetInputSymbols.
// The MutateCheck here in EditFstImpl is invoked whenever one of the
// mutating methods specifically related to the types of edits provided
// by EditFst is performed, such as changing an arc of an existing state
// of the wrapped fst via a MutableArcIterator, or adding a new state via
// AddState().
void MutateCheck() {
if (!data_.unique()) {
data_ =
std::make_shared<EditFstData<Arc, WrappedFstT, MutableFstT>>(*data_);
}
}
// The FST that this FST wraps. The purpose of this class is to enable
// non-destructive edits on this wrapped FST.
std::unique_ptr<const WrappedFstT> wrapped_;
// The mutable data for this EditFst instance, with delegates for all the
// methods that can mutate data.
std::shared_ptr<EditFstData<Arc, WrappedFstT, MutableFstT>> data_;
};
template <typename Arc, typename WrappedFstT, typename MutableFstT>
constexpr uint64_t EditFstImpl<Arc, WrappedFstT, MutableFstT>::kStaticProperties;
template <typename Arc, typename WrappedFstT, typename MutableFstT>
constexpr int EditFstImpl<Arc, WrappedFstT, MutableFstT>::kFileVersion;
template <typename Arc, typename WrappedFstT, typename MutableFstT>
constexpr int EditFstImpl<Arc, WrappedFstT, MutableFstT>::kMinFileVersion;
template <typename Arc, typename WrappedFstT, typename MutableFstT>
inline void EditFstImpl<Arc, WrappedFstT, MutableFstT>::DeleteStates() {
data_->DeleteStates();
// we are deleting all states, so just forget about pointer to wrapped_
// and do what default constructor does: set wrapped_ to a new VectorFst
wrapped_.reset(new MutableFstT());
const auto new_props =
DeleteAllStatesProperties(FstImpl<Arc>::Properties(), kStaticProperties);
FstImpl<Arc>::SetProperties(new_props);
}
template <typename Arc, typename WrappedFstT, typename MutableFstT>
EditFstImpl<Arc, WrappedFstT, MutableFstT> *
EditFstImpl<Arc, WrappedFstT, MutableFstT>::Read(std::istream &strm,
const FstReadOptions &opts) {
auto *impl = new EditFstImpl();
FstHeader hdr;
if (!impl->ReadHeader(strm, opts, kMinFileVersion, &hdr)) return nullptr;
impl->SetStart(hdr.Start());
// Reads in wrapped FST.
FstReadOptions wrapped_opts(opts);
// Contained header was written out, so reads it in too.
wrapped_opts.header = nullptr;
std::unique_ptr<Fst<Arc>> wrapped_fst(Fst<Arc>::Read(strm, wrapped_opts));
if (!wrapped_fst) return nullptr;
impl->wrapped_.reset(static_cast<WrappedFstT *>(wrapped_fst.release()));
impl->data_ = std::shared_ptr<EditFstData<Arc, WrappedFstT, MutableFstT>>(
EditFstData<Arc, WrappedFstT, MutableFstT>::Read(strm, opts));
if (!impl->data_) return nullptr;
return impl;
}
} // namespace internal
// Concrete, editable FST. This class attaches interface to implementation.
template <typename A, typename WrappedFstT = ExpandedFst<A>,
typename MutableFstT = VectorFst<A>>
class EditFst : public ImplToMutableFst<
internal::EditFstImpl<A, WrappedFstT, MutableFstT>> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Impl = internal::EditFstImpl<Arc, WrappedFstT, MutableFstT>;
friend class MutableArcIterator<EditFst<Arc, WrappedFstT, MutableFstT>>;
EditFst() : ImplToMutableFst<Impl>(std::make_shared<Impl>()) {}
explicit EditFst(const Fst<Arc> &fst)
: ImplToMutableFst<Impl>(std::make_shared<Impl>(fst)) {}
explicit EditFst(const WrappedFstT &fst)
: ImplToMutableFst<Impl>(std::make_shared<Impl>(fst)) {}
// See Fst<>::Copy() for doc.
EditFst(const EditFst<Arc, WrappedFstT, MutableFstT> &fst, bool safe = false)
: ImplToMutableFst<Impl>(fst, safe) {}
~EditFst() override {}
// Gets a copy of this EditFst. See Fst<>::Copy() for further doc.
EditFst<Arc, WrappedFstT, MutableFstT> *Copy(
bool safe = false) const override {
return new EditFst<Arc, WrappedFstT, MutableFstT>(*this, safe);
}
EditFst<Arc, WrappedFstT, MutableFstT> &operator=(
const EditFst<Arc, WrappedFstT, MutableFstT> &fst) {
SetImpl(fst.GetSharedImpl());
return *this;
}
EditFst<Arc, WrappedFstT, MutableFstT> &operator=(
const Fst<Arc> &fst) override {
SetImpl(std::make_shared<Impl>(fst));
return *this;
}
// Reads an EditFst from an input stream, returning nullptr on error.
static EditFst<Arc, WrappedFstT, MutableFstT> *Read(
std::istream &strm, const FstReadOptions &opts) {
auto *impl = Impl::Read(strm, opts);
return impl ? new EditFst<Arc>(std::shared_ptr<Impl>(impl)) : nullptr;
}
// Reads an EditFst from a file, returning nullptr on error. If the filename
// argument is an empty string, it reads from standard input.
static EditFst<Arc, WrappedFstT, MutableFstT> *Read(const string &filename) {
auto *impl = ImplToExpandedFst<Impl, MutableFst<Arc>>::Read(filename);
return impl ? new EditFst<Arc, WrappedFstT, MutableFstT>(
std::shared_ptr<Impl>(impl))
: nullptr;
}
bool Write(std::ostream &strm, const FstWriteOptions &opts) const override {
return GetImpl()->Write(strm, opts);
}
bool Write(const string &filename) const override {
return Fst<Arc>::WriteFile(filename);
}
void InitStateIterator(StateIteratorData<Arc> *data) const override {
GetImpl()->InitStateIterator(data);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetImpl()->InitArcIterator(s, data);
}
void InitMutableArcIterator(StateId s,
MutableArcIteratorData<A> *data) override {
GetMutableImpl()->InitMutableArcIterator(s, data);
}
private:
explicit EditFst(std::shared_ptr<Impl> impl) : ImplToMutableFst<Impl>(impl) {}
using ImplToFst<Impl, MutableFst<Arc>>::GetImpl;
using ImplToFst<Impl, MutableFst<Arc>>::GetMutableImpl;
using ImplToFst<Impl, MutableFst<Arc>>::SetImpl;
};
} // namespace fst
#endif // FST_EDIT_FST_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/topsort.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Topological sort of FSTs.
#ifndef FST_TOPSORT_H_
#define FST_TOPSORT_H_
#include <memory>
#include <vector>
#include <fst/dfs-visit.h>
#include <fst/fst.h>
#include <fst/statesort.h>
namespace fst {
// DFS visitor class to return topological ordering.
template <class Arc>
class TopOrderVisitor {
public:
using StateId = typename Arc::StateId;
// If acyclic, order[i] gives the topological position of StateId i;
// otherwise it is unchanged. acyclic_ will be true iff the FST has no
// cycles. The caller retains ownership of the state order vector.
TopOrderVisitor(std::vector<StateId> *order, bool *acyclic)
: order_(order), acyclic_(acyclic) {}
void InitVisit(const Fst<Arc> &fst) {
finish_.reset(new std::vector<StateId>());
*acyclic_ = true;
}
constexpr bool InitState(StateId, StateId) const { return true; }
constexpr bool TreeArc(StateId, const Arc &) const { return true; }
bool BackArc(StateId, const Arc &) { return (*acyclic_ = false); }
constexpr bool ForwardOrCrossArc(StateId, const Arc &) const { return true; }
void FinishState(StateId s, StateId, const Arc *) { finish_->push_back(s); }
void FinishVisit() {
if (*acyclic_) {
order_->clear();
for (StateId s = 0; s < finish_->size(); ++s) {
order_->push_back(kNoStateId);
}
for (StateId s = 0; s < finish_->size(); ++s) {
(*order_)[(*finish_)[finish_->size() - s - 1]] = s;
}
}
finish_.reset();
}
private:
std::vector<StateId> *order_;
bool *acyclic_;
// States in finish-time order.
std::unique_ptr<std::vector<StateId>> finish_;
};
// Topologically sorts its input if acyclic, modifying it. Otherwise, the input
// is unchanged. When sorted, all transitions are from lower to higher state
// IDs.
//
// Complexity:
//
// Time: O(V + E)
// Space: O(V + E)
//
// where V is the number of states and E is the number of arcs.
template <class Arc>
bool TopSort(MutableFst<Arc> *fst) {
std::vector<typename Arc::StateId> order;
bool acyclic;
TopOrderVisitor<Arc> top_order_visitor(&order, &acyclic);
DfsVisit(*fst, &top_order_visitor);
if (acyclic) {
StateSort(fst, order);
fst->SetProperties(kAcyclic | kInitialAcyclic | kTopSorted,
kAcyclic | kInitialAcyclic | kTopSorted);
} else {
fst->SetProperties(kCyclic | kNotTopSorted, kCyclic | kNotTopSorted);
}
return acyclic;
}
} // namespace fst
#endif // FST_TOPSORT_H_
| 0 |