Dataset Viewer
file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?; | }
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
} | Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect() | random_line_split |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self |
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
} | identifier_body |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn | (e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| or | identifier_name |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() |
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
return Err(serde::de::Error::custom("invalid string index"));
} | conditional_block |
xterm.rs | macro_rules! xterm_colors {
($(
$xterm_num:literal $name:ident ($r:literal, $g:literal, $b:literal)
)*) => {
pub(crate) mod dynamic {
use core::fmt;
#[allow(unused_imports)]
use crate::OwoColorize;
/// Available Xterm colors for use with [`OwoColorize::color`](OwoColorize::color)
/// or [`OwoColorize::on_color`](OwoColorize::on_color)
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum XtermColors {
$(
#[allow(missing_docs)]
$name,
)*
}
impl crate::DynColor for XtermColors {
fn fmt_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[38;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[48;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("38;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("48;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
#[doc(hidden)]
fn get_dyncolors_fg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
#[doc(hidden)]
fn get_dyncolors_bg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
}
impl From<u8> for XtermColors {
fn from(x: u8) -> Self {
match x {
$(
$xterm_num => XtermColors::$name,
)*
}
}
}
| fn from(color: XtermColors) -> Self {
match color {
$(
XtermColors::$name => $xterm_num,
)*
}
}
}
}
$(
#[allow(missing_docs)]
pub struct $name;
impl crate::Color for $name {
const ANSI_FG: &'static str = concat!("\x1b[38;5;", stringify!($xterm_num), "m");
const ANSI_BG: &'static str = concat!("\x1b[48;5;", stringify!($xterm_num), "m");
const RAW_ANSI_BG: &'static str = concat!("48;5;", stringify!($xterm_num));
const RAW_ANSI_FG: &'static str = concat!("48;5;", stringify!($xterm_num));
#[doc(hidden)]
type DynEquivelant = dynamic::XtermColors;
#[doc(hidden)]
const DYN_EQUIVELANT: Self::DynEquivelant = dynamic::XtermColors::$name;
#[doc(hidden)]
fn into_dyncolors() -> crate::DynColors {
crate::DynColors::Xterm(dynamic::XtermColors::$name)
}
}
)*
};
}
xterm_colors! {
0 UserBlack (0,0,0)
1 UserRed (128,0,0)
2 UserGreen (0,128,0)
3 UserYellow (128,128,0)
4 UserBlue (0,0,128)
5 UserMagenta (128,0,128)
6 UserCyan (0,128,128)
7 UserWhite (192,192,192)
8 UserBrightBlack (128,128,128)
9 UserBrightRed (255,0,0)
10 UserBrightGreen (0,255,0)
11 UserBrightYellow (255,255,0)
12 UserBrightBlue (0,0,255)
13 UserBrightMagenta (255,0,255)
14 UserBrightCyan (0,255,255)
15 UserBrightWhite (255,255,255)
16 Black (0,0,0)
17 StratosBlue (0,0,95)
18 NavyBlue (0,0,135)
19 MidnightBlue (0,0,175)
20 DarkBlue (0,0,215)
21 Blue (0,0,255)
22 CamaroneGreen (0,95,0)
23 BlueStone (0,95,95)
24 OrientBlue (0,95,135)
25 EndeavourBlue (0,95,175)
26 ScienceBlue (0,95,215)
27 BlueRibbon (0,95,255)
28 JapaneseLaurel (0,135,0)
29 DeepSeaGreen (0,135,95)
30 Teal (0,135,135)
31 DeepCerulean (0,135,175)
32 LochmaraBlue (0,135,215)
33 AzureRadiance (0,135,255)
34 LightJapaneseLaurel (0,175,0)
35 Jade (0,175,95)
36 PersianGreen (0,175,135)
37 BondiBlue (0,175,175)
38 Cerulean (0,175,215)
39 LightAzureRadiance (0,175,255)
40 DarkGreen (0,215,0)
41 Malachite (0,215,95)
42 CaribbeanGreen (0,215,135)
43 LightCaribbeanGreen (0,215,175)
44 RobinEggBlue (0,215,215)
45 Aqua (0,215,255)
46 Green (0,255,0)
47 DarkSpringGreen (0,255,95)
48 SpringGreen (0,255,135)
49 LightSpringGreen (0,255,175)
50 BrightTurquoise (0,255,215)
51 Cyan (0,255,255)
52 Rosewood (95,0,0)
53 PompadourMagenta (95,0,95)
54 PigmentIndigo (95,0,135)
55 DarkPurple (95,0,175)
56 ElectricIndigo (95,0,215)
57 ElectricPurple (95,0,255)
58 VerdunGreen (95,95,0)
59 ScorpionOlive (95,95,95)
60 Lilac (95,95,135)
61 ScampiIndigo (95,95,175)
62 Indigo (95,95,215)
63 DarkCornflowerBlue (95,95,255)
64 DarkLimeade (95,135,0)
65 GladeGreen (95,135,95)
66 JuniperGreen (95,135,135)
67 HippieBlue (95,135,175)
68 HavelockBlue (95,135,215)
69 CornflowerBlue (95,135,255)
70 Limeade (95,175,0)
71 FernGreen (95,175,95)
72 SilverTree (95,175,135)
73 Tradewind (95,175,175)
74 ShakespeareBlue (95,175,215)
75 DarkMalibuBlue (95,175,255)
76 DarkBrightGreen (95,215,0)
77 DarkPastelGreen (95,215,95)
78 PastelGreen (95,215,135)
79 DownyTeal (95,215,175)
80 Viking (95,215,215)
81 MalibuBlue (95,215,255)
82 BrightGreen (95,255,0)
83 DarkScreaminGreen (95,255,95)
84 ScreaminGreen (95,255,135)
85 DarkAquamarine (95,255,175)
86 Aquamarine (95,255,215)
87 LightAquamarine (95,255,255)
88 Maroon (135,0,0)
89 DarkFreshEggplant (135,0,95)
90 LightFreshEggplant (135,0,135)
91 Purple (135,0,175)
92 ElectricViolet (135,0,215)
93 LightElectricViolet (135,0,255)
94 Brown (135,95,0)
95 CopperRose (135,95,95)
96 StrikemasterPurple (135,95,135)
97 DelugePurple (135,95,175)
98 DarkMediumPurple (135,95,215)
99 DarkHeliotropePurple (135,95,255)
100 Olive (135,135,0)
101 ClayCreekOlive (135,135,95)
102 DarkGray (135,135,135)
103 WildBlueYonder (135,135,175)
104 ChetwodeBlue (135,135,215)
105 SlateBlue (135,135,255)
106 LightLimeade (135,175,0)
107 ChelseaCucumber (135,175,95)
108 BayLeaf (135,175,135)
109 GulfStream (135,175,175)
110 PoloBlue (135,175,215)
111 LightMalibuBlue (135,175,255)
112 Pistachio (135,215,0)
113 LightPastelGreen (135,215,95)
114 DarkFeijoaGreen (135,215,135)
115 VistaBlue (135,215,175)
116 Bermuda (135,215,215)
117 DarkAnakiwaBlue (135,215,255)
118 ChartreuseGreen (135,255,0)
119 LightScreaminGreen (135,255,95)
120 DarkMintGreen (135,255,135)
121 MintGreen (135,255,175)
122 LighterAquamarine (135,255,215)
123 AnakiwaBlue (135,255,255)
124 BrightRed (175,0,0)
125 DarkFlirt (175,0,95)
126 Flirt (175,0,135)
127 LightFlirt (175,0,175)
128 DarkViolet (175,0,215)
129 BrightElectricViolet (175,0,255)
130 RoseofSharonOrange (175,95,0)
131 MatrixPink (175,95,95)
132 TapestryPink (175,95,135)
133 FuchsiaPink (175,95,175)
134 MediumPurple (175,95,215)
135 Heliotrope (175,95,255)
136 PirateGold (175,135,0)
137 MuesliOrange (175,135,95)
138 PharlapPink (175,135,135)
139 Bouquet (175,135,175)
140 Lavender (175,135,215)
141 LightHeliotrope (175,135,255)
142 BuddhaGold (175,175,0)
143 OliveGreen (175,175,95)
144 HillaryOlive (175,175,135)
145 SilverChalice (175,175,175)
146 WistfulLilac (175,175,215)
147 MelroseLilac (175,175,255)
148 RioGrandeGreen (175,215,0)
149 ConiferGreen (175,215,95)
150 Feijoa (175,215,135)
151 PixieGreen (175,215,175)
152 JungleMist (175,215,215)
153 LightAnakiwaBlue (175,215,255)
154 Lime (175,255,0)
155 GreenYellow (175,255,95)
156 LightMintGreen (175,255,135)
157 Celadon (175,255,175)
158 AeroBlue (175,255,215)
159 FrenchPassLightBlue (175,255,255)
160 GuardsmanRed (215,0,0)
161 RazzmatazzCerise (215,0,95)
162 MediumVioletRed (215,0,135)
163 HollywoodCerise (215,0,175)
164 DarkPurplePizzazz (215,0,215)
165 BrighterElectricViolet (215,0,255)
166 TennOrange (215,95,0)
167 RomanOrange (215,95,95)
168 CranberryPink (215,95,135)
169 HopbushPink (215,95,175)
170 Orchid (215,95,215)
171 LighterHeliotrope (215,95,255)
172 MangoTango (215,135,0)
173 Copperfield (215,135,95)
174 SeaPink (215,135,135)
175 CanCanPink (215,135,175)
176 LightOrchid (215,135,215)
177 BrightHeliotrope (215,135,255)
178 DarkCorn (215,175,0)
179 DarkTachaOrange (215,175,95)
180 TanBeige (215,175,135)
181 ClamShell (215,175,175)
182 ThistlePink (215,175,215)
183 Mauve (215,175,255)
184 Corn (215,215,0)
185 TachaOrange (215,215,95)
186 DecoOrange (215,215,135)
187 PaleGoldenrod (215,215,175)
188 AltoBeige (215,215,215)
189 FogPink (215,215,255)
190 ChartreuseYellow (215,255,0)
191 Canary (215,255,95)
192 Honeysuckle (215,255,135)
193 ReefPaleYellow (215,255,175)
194 SnowyMint (215,255,215)
195 OysterBay (215,255,255)
196 Red (255,0,0)
197 DarkRose (255,0,95)
198 Rose (255,0,135)
199 LightHollywoodCerise (255,0,175)
200 PurplePizzazz (255,0,215)
201 Fuchsia (255,0,255)
202 BlazeOrange (255,95,0)
203 BittersweetOrange (255,95,95)
204 WildWatermelon (255,95,135)
205 DarkHotPink (255,95,175)
206 HotPink (255,95,215)
207 PinkFlamingo (255,95,255)
208 FlushOrange (255,135,0)
209 Salmon (255,135,95)
210 VividTangerine (255,135,135)
211 PinkSalmon (255,135,175)
212 DarkLavenderRose (255,135,215)
213 BlushPink (255,135,255)
214 YellowSea (255,175,0)
215 TexasRose (255,175,95)
216 Tacao (255,175,135)
217 Sundown (255,175,175)
218 CottonCandy (255,175,215)
219 LavenderRose (255,175,255)
220 Gold (255,215,0)
221 Dandelion (255,215,95)
222 GrandisCaramel (255,215,135)
223 Caramel (255,215,175)
224 CosmosSalmon (255,215,215)
225 PinkLace (255,215,255)
226 Yellow (255,255,0)
227 LaserLemon (255,255,95)
228 DollyYellow (255,255,135)
229 PortafinoYellow (255,255,175)
230 Cumulus (255,255,215)
231 White (255,255,255)
232 DarkCodGray (8,8,8)
233 CodGray (18,18,18)
234 LightCodGray (28,28,28)
235 DarkMineShaft (38,38,38)
236 MineShaft (48,48,48)
237 LightMineShaft (58,58,58)
238 DarkTundora (68,68,68)
239 Tundora (78,78,78)
240 ScorpionGray (88,88,88)
241 DarkDoveGray (98,98,98)
242 DoveGray (108,108,108)
243 Boulder (118,118,118)
244 Gray (128,128,128)
245 LightGray (138,138,138)
246 DustyGray (148,148,148)
247 NobelGray (158,158,158)
248 DarkSilverChalice (168,168,168)
249 LightSilverChalice (178,178,178)
250 DarkSilver (188,188,188)
251 Silver (198,198,198)
252 DarkAlto (208,208,208)
253 Alto (218,218,218)
254 Mercury (228,228,228)
255 GalleryGray (238,238,238)
} | impl From<XtermColors> for u8 { | random_line_split |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def touchmap(env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
|
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
prettyprint(o_grids[obj])
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
| for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
| identifier_body |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def touchmap(env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
| print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
prettyprint(o_grids[obj])
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one | if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
| random_line_split |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def touchmap(env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
|
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
| prettyprint(o_grids[obj]) | conditional_block |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def | (env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
prettyprint(o_grids[obj])
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
| touchmap | identifier_name |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
|
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
| out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping | identifier_body |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
|
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
| if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir) | conditional_block |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries! | articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode) | #if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2( | random_line_split |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def | (out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
| parse_articles | identifier_name |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 104