file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?; | }
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
} | Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect() | random_line_split |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self |
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
} | identifier_body |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() {
return Err(serde::de::Error::custom("invalid string index"));
}
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn | (e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| or | identifier_name |
main.rs | use maplit::btreeset;
use reduce::Reduce;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned};
use std::{
collections::{BTreeMap, BTreeSet},
ops::{BitAnd, BitOr},
};
/// a compact index
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Index {
/// the strings table
strings: BTreeSet<String>,
/// indices in these sets are guaranteed to correspond to strings in the strings table
elements: Vec<BTreeSet<u32>>,
}
impl Serialize for Index {
fn serialize<S: Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
// serialize as a tuple so it is guaranteed that the strings table is before the indices,
// in case we ever want to write a clever visitor that matches without building an AST
// of the deserialized result.
(&self.strings, &self.elements).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Index {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
let (strings, elements) = <(Vec<String>, Vec<BTreeSet<u32>>)>::deserialize(deserializer)?;
// ensure valid indices
for s in elements.iter() {
for x in s {
if strings.get(*x as usize).is_none() |
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?;
Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect()
}
fn main() {
let strings = (0..5000).map(|i| {
let fizz = i % 3 == 0;
let buzz = i % 5 == 0;
if fizz && buzz {
btreeset!{"fizzbuzz".to_owned(), "com.somecompany.somenamespace.someapp.sometype".to_owned()}
} else if fizz {
btreeset!{"fizz".to_owned(), "org.schema.registry.someothertype".to_owned()}
} else if buzz {
btreeset!{"buzz".to_owned(), "factory.provider.interface.adapter".to_owned()}
} else {
btreeset!{format!("{}", i % 11), "we.like.long.identifiers.because.they.seem.professional".to_owned()}
}
}).collect::<Vec<_>>();
let large = Index::from_elements(&borrow_inner(&strings));
let compressed = compresss_zstd_cbor(&large).unwrap();
let large1: Index = decompress_zstd_cbor(&compressed).unwrap();
assert_eq!(large, large1);
println!("naive cbor {}", serde_cbor::to_vec(&strings).unwrap().len());
println!("index cbor {}", serde_cbor::to_vec(&large).unwrap().len());
println!("compressed {}", compressed.len());
let index = Index::from_elements(&[
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let text = serde_json::to_string(&index).unwrap();
println!("{:?}", index);
println!("{}", text);
let expr = l("a") | l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("a") & l("b");
println!("{:?}", index.matching(expr.dnf()));
let expr = l("c") & l("d");
println!("{:?}", index.matching(expr.dnf()));
}
| {
return Err(serde::de::Error::custom("invalid string index"));
} | conditional_block |
xterm.rs | macro_rules! xterm_colors {
($(
$xterm_num:literal $name:ident ($r:literal, $g:literal, $b:literal)
)*) => {
pub(crate) mod dynamic {
use core::fmt;
#[allow(unused_imports)]
use crate::OwoColorize;
/// Available Xterm colors for use with [`OwoColorize::color`](OwoColorize::color)
/// or [`OwoColorize::on_color`](OwoColorize::on_color)
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum XtermColors {
$(
#[allow(missing_docs)]
$name,
)*
}
impl crate::DynColor for XtermColors {
fn fmt_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[38;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("\x1b[48;5;", stringify!($xterm_num), "m"),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_fg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("38;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
fn fmt_raw_ansi_bg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let color = match self {
$(
XtermColors::$name => concat!("48;5;", stringify!($xterm_num)),
)*
};
f.write_str(color)
}
#[doc(hidden)]
fn get_dyncolors_fg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
#[doc(hidden)]
fn get_dyncolors_bg(&self) -> crate::DynColors {
crate::DynColors::Xterm(*self)
}
}
impl From<u8> for XtermColors {
fn from(x: u8) -> Self {
match x {
$(
$xterm_num => XtermColors::$name,
)*
}
}
}
| fn from(color: XtermColors) -> Self {
match color {
$(
XtermColors::$name => $xterm_num,
)*
}
}
}
}
$(
#[allow(missing_docs)]
pub struct $name;
impl crate::Color for $name {
const ANSI_FG: &'static str = concat!("\x1b[38;5;", stringify!($xterm_num), "m");
const ANSI_BG: &'static str = concat!("\x1b[48;5;", stringify!($xterm_num), "m");
const RAW_ANSI_BG: &'static str = concat!("48;5;", stringify!($xterm_num));
const RAW_ANSI_FG: &'static str = concat!("48;5;", stringify!($xterm_num));
#[doc(hidden)]
type DynEquivelant = dynamic::XtermColors;
#[doc(hidden)]
const DYN_EQUIVELANT: Self::DynEquivelant = dynamic::XtermColors::$name;
#[doc(hidden)]
fn into_dyncolors() -> crate::DynColors {
crate::DynColors::Xterm(dynamic::XtermColors::$name)
}
}
)*
};
}
xterm_colors! {
0 UserBlack (0,0,0)
1 UserRed (128,0,0)
2 UserGreen (0,128,0)
3 UserYellow (128,128,0)
4 UserBlue (0,0,128)
5 UserMagenta (128,0,128)
6 UserCyan (0,128,128)
7 UserWhite (192,192,192)
8 UserBrightBlack (128,128,128)
9 UserBrightRed (255,0,0)
10 UserBrightGreen (0,255,0)
11 UserBrightYellow (255,255,0)
12 UserBrightBlue (0,0,255)
13 UserBrightMagenta (255,0,255)
14 UserBrightCyan (0,255,255)
15 UserBrightWhite (255,255,255)
16 Black (0,0,0)
17 StratosBlue (0,0,95)
18 NavyBlue (0,0,135)
19 MidnightBlue (0,0,175)
20 DarkBlue (0,0,215)
21 Blue (0,0,255)
22 CamaroneGreen (0,95,0)
23 BlueStone (0,95,95)
24 OrientBlue (0,95,135)
25 EndeavourBlue (0,95,175)
26 ScienceBlue (0,95,215)
27 BlueRibbon (0,95,255)
28 JapaneseLaurel (0,135,0)
29 DeepSeaGreen (0,135,95)
30 Teal (0,135,135)
31 DeepCerulean (0,135,175)
32 LochmaraBlue (0,135,215)
33 AzureRadiance (0,135,255)
34 LightJapaneseLaurel (0,175,0)
35 Jade (0,175,95)
36 PersianGreen (0,175,135)
37 BondiBlue (0,175,175)
38 Cerulean (0,175,215)
39 LightAzureRadiance (0,175,255)
40 DarkGreen (0,215,0)
41 Malachite (0,215,95)
42 CaribbeanGreen (0,215,135)
43 LightCaribbeanGreen (0,215,175)
44 RobinEggBlue (0,215,215)
45 Aqua (0,215,255)
46 Green (0,255,0)
47 DarkSpringGreen (0,255,95)
48 SpringGreen (0,255,135)
49 LightSpringGreen (0,255,175)
50 BrightTurquoise (0,255,215)
51 Cyan (0,255,255)
52 Rosewood (95,0,0)
53 PompadourMagenta (95,0,95)
54 PigmentIndigo (95,0,135)
55 DarkPurple (95,0,175)
56 ElectricIndigo (95,0,215)
57 ElectricPurple (95,0,255)
58 VerdunGreen (95,95,0)
59 ScorpionOlive (95,95,95)
60 Lilac (95,95,135)
61 ScampiIndigo (95,95,175)
62 Indigo (95,95,215)
63 DarkCornflowerBlue (95,95,255)
64 DarkLimeade (95,135,0)
65 GladeGreen (95,135,95)
66 JuniperGreen (95,135,135)
67 HippieBlue (95,135,175)
68 HavelockBlue (95,135,215)
69 CornflowerBlue (95,135,255)
70 Limeade (95,175,0)
71 FernGreen (95,175,95)
72 SilverTree (95,175,135)
73 Tradewind (95,175,175)
74 ShakespeareBlue (95,175,215)
75 DarkMalibuBlue (95,175,255)
76 DarkBrightGreen (95,215,0)
77 DarkPastelGreen (95,215,95)
78 PastelGreen (95,215,135)
79 DownyTeal (95,215,175)
80 Viking (95,215,215)
81 MalibuBlue (95,215,255)
82 BrightGreen (95,255,0)
83 DarkScreaminGreen (95,255,95)
84 ScreaminGreen (95,255,135)
85 DarkAquamarine (95,255,175)
86 Aquamarine (95,255,215)
87 LightAquamarine (95,255,255)
88 Maroon (135,0,0)
89 DarkFreshEggplant (135,0,95)
90 LightFreshEggplant (135,0,135)
91 Purple (135,0,175)
92 ElectricViolet (135,0,215)
93 LightElectricViolet (135,0,255)
94 Brown (135,95,0)
95 CopperRose (135,95,95)
96 StrikemasterPurple (135,95,135)
97 DelugePurple (135,95,175)
98 DarkMediumPurple (135,95,215)
99 DarkHeliotropePurple (135,95,255)
100 Olive (135,135,0)
101 ClayCreekOlive (135,135,95)
102 DarkGray (135,135,135)
103 WildBlueYonder (135,135,175)
104 ChetwodeBlue (135,135,215)
105 SlateBlue (135,135,255)
106 LightLimeade (135,175,0)
107 ChelseaCucumber (135,175,95)
108 BayLeaf (135,175,135)
109 GulfStream (135,175,175)
110 PoloBlue (135,175,215)
111 LightMalibuBlue (135,175,255)
112 Pistachio (135,215,0)
113 LightPastelGreen (135,215,95)
114 DarkFeijoaGreen (135,215,135)
115 VistaBlue (135,215,175)
116 Bermuda (135,215,215)
117 DarkAnakiwaBlue (135,215,255)
118 ChartreuseGreen (135,255,0)
119 LightScreaminGreen (135,255,95)
120 DarkMintGreen (135,255,135)
121 MintGreen (135,255,175)
122 LighterAquamarine (135,255,215)
123 AnakiwaBlue (135,255,255)
124 BrightRed (175,0,0)
125 DarkFlirt (175,0,95)
126 Flirt (175,0,135)
127 LightFlirt (175,0,175)
128 DarkViolet (175,0,215)
129 BrightElectricViolet (175,0,255)
130 RoseofSharonOrange (175,95,0)
131 MatrixPink (175,95,95)
132 TapestryPink (175,95,135)
133 FuchsiaPink (175,95,175)
134 MediumPurple (175,95,215)
135 Heliotrope (175,95,255)
136 PirateGold (175,135,0)
137 MuesliOrange (175,135,95)
138 PharlapPink (175,135,135)
139 Bouquet (175,135,175)
140 Lavender (175,135,215)
141 LightHeliotrope (175,135,255)
142 BuddhaGold (175,175,0)
143 OliveGreen (175,175,95)
144 HillaryOlive (175,175,135)
145 SilverChalice (175,175,175)
146 WistfulLilac (175,175,215)
147 MelroseLilac (175,175,255)
148 RioGrandeGreen (175,215,0)
149 ConiferGreen (175,215,95)
150 Feijoa (175,215,135)
151 PixieGreen (175,215,175)
152 JungleMist (175,215,215)
153 LightAnakiwaBlue (175,215,255)
154 Lime (175,255,0)
155 GreenYellow (175,255,95)
156 LightMintGreen (175,255,135)
157 Celadon (175,255,175)
158 AeroBlue (175,255,215)
159 FrenchPassLightBlue (175,255,255)
160 GuardsmanRed (215,0,0)
161 RazzmatazzCerise (215,0,95)
162 MediumVioletRed (215,0,135)
163 HollywoodCerise (215,0,175)
164 DarkPurplePizzazz (215,0,215)
165 BrighterElectricViolet (215,0,255)
166 TennOrange (215,95,0)
167 RomanOrange (215,95,95)
168 CranberryPink (215,95,135)
169 HopbushPink (215,95,175)
170 Orchid (215,95,215)
171 LighterHeliotrope (215,95,255)
172 MangoTango (215,135,0)
173 Copperfield (215,135,95)
174 SeaPink (215,135,135)
175 CanCanPink (215,135,175)
176 LightOrchid (215,135,215)
177 BrightHeliotrope (215,135,255)
178 DarkCorn (215,175,0)
179 DarkTachaOrange (215,175,95)
180 TanBeige (215,175,135)
181 ClamShell (215,175,175)
182 ThistlePink (215,175,215)
183 Mauve (215,175,255)
184 Corn (215,215,0)
185 TachaOrange (215,215,95)
186 DecoOrange (215,215,135)
187 PaleGoldenrod (215,215,175)
188 AltoBeige (215,215,215)
189 FogPink (215,215,255)
190 ChartreuseYellow (215,255,0)
191 Canary (215,255,95)
192 Honeysuckle (215,255,135)
193 ReefPaleYellow (215,255,175)
194 SnowyMint (215,255,215)
195 OysterBay (215,255,255)
196 Red (255,0,0)
197 DarkRose (255,0,95)
198 Rose (255,0,135)
199 LightHollywoodCerise (255,0,175)
200 PurplePizzazz (255,0,215)
201 Fuchsia (255,0,255)
202 BlazeOrange (255,95,0)
203 BittersweetOrange (255,95,95)
204 WildWatermelon (255,95,135)
205 DarkHotPink (255,95,175)
206 HotPink (255,95,215)
207 PinkFlamingo (255,95,255)
208 FlushOrange (255,135,0)
209 Salmon (255,135,95)
210 VividTangerine (255,135,135)
211 PinkSalmon (255,135,175)
212 DarkLavenderRose (255,135,215)
213 BlushPink (255,135,255)
214 YellowSea (255,175,0)
215 TexasRose (255,175,95)
216 Tacao (255,175,135)
217 Sundown (255,175,175)
218 CottonCandy (255,175,215)
219 LavenderRose (255,175,255)
220 Gold (255,215,0)
221 Dandelion (255,215,95)
222 GrandisCaramel (255,215,135)
223 Caramel (255,215,175)
224 CosmosSalmon (255,215,215)
225 PinkLace (255,215,255)
226 Yellow (255,255,0)
227 LaserLemon (255,255,95)
228 DollyYellow (255,255,135)
229 PortafinoYellow (255,255,175)
230 Cumulus (255,255,215)
231 White (255,255,255)
232 DarkCodGray (8,8,8)
233 CodGray (18,18,18)
234 LightCodGray (28,28,28)
235 DarkMineShaft (38,38,38)
236 MineShaft (48,48,48)
237 LightMineShaft (58,58,58)
238 DarkTundora (68,68,68)
239 Tundora (78,78,78)
240 ScorpionGray (88,88,88)
241 DarkDoveGray (98,98,98)
242 DoveGray (108,108,108)
243 Boulder (118,118,118)
244 Gray (128,128,128)
245 LightGray (138,138,138)
246 DustyGray (148,148,148)
247 NobelGray (158,158,158)
248 DarkSilverChalice (168,168,168)
249 LightSilverChalice (178,178,178)
250 DarkSilver (188,188,188)
251 Silver (198,198,198)
252 DarkAlto (208,208,208)
253 Alto (218,218,218)
254 Mercury (228,228,228)
255 GalleryGray (238,238,238)
} | impl From<XtermColors> for u8 { | random_line_split |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def touchmap(env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
|
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
prettyprint(o_grids[obj])
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
| for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
| identifier_body |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def touchmap(env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
| print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
prettyprint(o_grids[obj])
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one | if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
| random_line_split |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def touchmap(env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
|
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
| prettyprint(o_grids[obj]) | conditional_block |
gtmaps.py | import numpy as np
import math
import sys
import glob
import os
import json
import random
import copy
from skimage.measure import regionprops, label
def get_file(rn = 302, task_index = 1, trial_num = 0):
folders = sorted(glob.glob('/home/hom/alfred/data/json_2.1.0/train/*'+repr(rn))) #for home computer
print("Number of demonstrated tasks for this room ",len(folders))
trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks
print("Number of different trials (language instr) for the same task ",len(trials))
traj = glob.glob(trials[trial_num]+'/*.json')
print("got trajectory file ",traj)
return traj
def | (env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
prettyprint(o_grids[obj])
else:
prettyprint(o_grids[obj+'|'])
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
| touchmap | identifier_name |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
|
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
| out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping | identifier_body |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
|
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
| if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir) | conditional_block |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries! | articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode) | #if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2( | random_line_split |
corpus_wikipedia.py | from __future__ import print_function
import csv
import os
from sys import maxsize
import pickle
import tensorflow as tf
import numpy as np
import spacy
import constants
import corpus
import preprocessing
import sequence_node_sequence_pb2
import tools
import random
from multiprocessing import Pool
import fnmatch
import ntpath
import re
tf.flags.DEFINE_string(
'corpus_data_input_train', '/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def | (out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = out_path + '_' + FLAGS.tree_mode
out_path = out_path + '_articles' + str(FLAGS.max_articles)
out_path = out_path + '_maxdepth' + str(FLAGS.max_depth)
print('output base file name: '+out_path)
nlp = None
nlp = convert_wikipedia(FLAGS.corpus_data_input_train,
out_path,
FLAGS.init_dict_filename,
sentence_processor,
nlp,
#mapping,
#vecs,
max_articles=FLAGS.max_articles,
max_depth=FLAGS.max_depth,
#sample_count=FLAGS.sample_count,
batch_size=FLAGS.article_batch_size)
#print('len(mapping): '+str(len(mapping)))
#print('parse train data ...')
#convert_sick(FLAGS.corpus_data_input_train,
# out_path + '.train',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
#print('parse test data ...')
#convert_sick(FLAGS.corpus_data_input_test,
# out_path + '.test',
# sentence_processor,
# nlp,
# mapping,
# FLAGS.corpus_size,
# FLAGS.tree_mode)
| parse_articles | identifier_name |
sync.go | package diskrsync
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"log"
"math"
"github.com/dop251/spgz"
"golang.org/x/crypto/blake2b"
)
const (
hdrMagic = "BSNC0002"
)
const (
hashSize = 64
DefTargetBlockSize = 128 * 1024
)
const (
cmdHole byte = iota
cmdBlock
cmdEqual
cmdNotEqual
)
var (
ErrInvalidFormat = errors.New("invalid data format")
)
type ProgressListener interface {
Start(size int64)
Update(position int64)
}
type hashPool []hash.Hash
type workCtx struct {
buf []byte
n *node
hash hash.Hash
avail, hashReady chan struct{}
}
type node struct {
buf [hashSize]byte
parent *node
idx int
children []*node
size int
hash hash.Hash
sum []byte
}
type tree struct {
root *node
size int64
reader io.ReadSeeker
useBuffer bool
}
type base struct {
t tree
buf []byte
cmdReader io.Reader
cmdWriter io.Writer
syncProgressListener ProgressListener
}
type source struct {
base
reader io.ReadSeeker
}
type target struct {
base
writer *batchingWriter
}
// Accumulates successive writes into a large buffer so that the writes into the underlying spgz.SpgzFile
// cover compressed blocks completely, so they are not read and unpacked before writing.
type batchingWriter struct {
writer spgz.SparseFile
maxSize int
offset int64
holeSize int64
buf []byte
}
func (w *batchingWriter) | () error {
if w.holeSize > 0 {
err := w.writer.PunchHole(w.offset, w.holeSize)
if err == nil {
w.offset += w.holeSize
w.holeSize = 0
}
return err
}
if len(w.buf) == 0 {
return nil
}
n, err := w.writer.WriteAt(w.buf, w.offset)
if err != nil {
return err
}
w.buf = w.buf[:0]
w.offset += int64(n)
return nil
}
func (w *batchingWriter) prepareWrite() error {
if w.holeSize > 0 {
err := w.Flush()
if err != nil {
return err
}
}
if cap(w.buf) < w.maxSize {
buf := make([]byte, w.maxSize)
copy(buf, w.buf)
w.buf = buf[:len(w.buf)]
}
return nil
}
func (w *batchingWriter) Write(p []byte) (int, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
written := 0
for len(p) > 0 {
if len(p) >= w.maxSize && len(w.buf) == 0 {
residue := len(p) % w.maxSize
n, err := w.writer.WriteAt(p[:len(p)-residue], w.offset)
written += n
w.offset += int64(n)
if err != nil {
return written, err
}
p = p[n:]
} else {
n := copy(w.buf[len(w.buf):w.maxSize], p)
w.buf = w.buf[:len(w.buf)+n]
if len(w.buf) == w.maxSize {
n1, err := w.writer.WriteAt(w.buf, w.offset)
w.offset += int64(n1)
n2 := n1 - (len(w.buf) - n)
w.buf = w.buf[:0]
if n2 < 0 {
n2 = 0
}
written += n2
if err != nil {
return written, err
}
} else {
written += n
}
p = p[n:]
}
}
return written, nil
}
func (w *batchingWriter) ReadFrom(src io.Reader) (int64, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
var read int64
for {
n, err := src.Read(w.buf[len(w.buf):w.maxSize])
read += int64(n)
w.buf = w.buf[:len(w.buf)+n]
if err == io.EOF {
return read, nil
}
if err != nil {
return read, err
}
if len(w.buf) == w.maxSize {
err = w.Flush()
if err != nil {
return read, err
}
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems[workIdx].n = nil
workItems[workIdx].hashReady <- struct{}{}
if rr < t.size {
return fmt.Errorf("read less data (%d) than expected (%d)", rr, t.size)
}
return nil
}
func readHeader(reader io.Reader) (size int64, err error) {
buf := make([]byte, len(hdrMagic)+8)
_, err = io.ReadFull(reader, buf)
if err != nil {
return
}
if string(buf[:len(hdrMagic)]) != hdrMagic {
err = ErrInvalidFormat
return
}
size = int64(binary.LittleEndian.Uint64(buf[len(hdrMagic):]))
return
}
func writeHeader(writer io.Writer, size int64) (err error) {
buf := make([]byte, len(hdrMagic)+8)
copy(buf, hdrMagic)
binary.LittleEndian.PutUint64(buf[len(hdrMagic):], uint64(size))
_, err = writer.Write(buf)
return
}
func Source(reader io.ReadSeeker, size int64, cmdReader io.Reader, cmdWriter io.Writer, useBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
err = writeHeader(cmdWriter, size)
if err != nil {
return
}
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return fmt.Errorf("could not read header: %w", err)
}
var commonSize int64
if remoteSize < size {
commonSize = remoteSize
} else {
commonSize = size
}
if commonSize > 0 {
s := source{
base: base{
t: tree{
reader: reader,
size: commonSize,
useBuffer: useBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
reader: reader,
}
err = s.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
s.syncProgressListener = syncPl
syncPl.Start(size)
}
err = s.subtree(s.t.root, 0, commonSize)
if err != nil {
return
}
} else {
if syncPl != nil {
syncPl.Start(size)
}
}
if size > commonSize {
// Write the tail
_, err = reader.Seek(commonSize, io.SeekStart)
if err != nil {
return
}
holeStart := int64(-1)
curPos := commonSize
buf := make([]byte, DefTargetBlockSize)
bw := bufio.NewWriterSize(cmdWriter, DefTargetBlockSize*2)
for {
var r int
var stop bool
r, err = io.ReadFull(reader, buf)
if err != nil {
if err == io.EOF {
break
}
if err != io.ErrUnexpectedEOF {
return fmt.Errorf("source, reading tail: %w", err)
}
buf = buf[:r]
stop = true
err = nil
}
if spgz.IsBlockZero(buf) {
if holeStart == -1 {
holeStart = curPos
}
} else {
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
holeStart = -1
}
err = bw.WriteByte(cmdBlock)
if err != nil {
return
}
_, err = bw.Write(buf)
if err != nil {
return
}
}
if err != nil {
return
}
curPos += int64(r)
if syncPl != nil {
syncPl.Update(curPos)
}
if stop {
break
}
}
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
}
err = bw.Flush()
}
return
}
func (s *source) subtree(root *node, offset, size int64) (err error) {
remoteHash := make([]byte, hashSize)
_, err = io.ReadFull(s.cmdReader, remoteHash)
if err != nil {
return fmt.Errorf("source/subtree, reading hash: %w", err)
}
if bytes.Equal(root.sum, remoteHash) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdEqual)
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
return
}
if root.size > 0 {
// log.Printf("Blocks at %d don't match\n", offset)
if int64(root.size) != size {
panic("Leaf node size mismatch")
}
_, err = s.reader.Seek(offset, io.SeekStart)
if err != nil {
return
}
buf := s.buffer(size)
_, err = io.ReadFull(s.reader, buf)
if err != nil {
return fmt.Errorf("source read failed at %d: %w", offset, err)
}
if spgz.IsBlockZero(buf) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdHole)
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
_, err = s.cmdWriter.Write(buf)
}
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = s.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
return
}
type TargetFile interface {
io.ReadWriteSeeker
io.WriterAt
io.Closer
spgz.Truncatable
}
// FixingSpgzFileWrapper conceals read errors caused by compressed data corruption by re-writing the corrupt
// blocks with zeros. Such errors are usually caused by abrupt termination of the writing process.
// This wrapper is used as the sync target so the corrupt blocks will be updated during the sync process.
type FixingSpgzFileWrapper struct {
*spgz.SpgzFile
}
func (rw *FixingSpgzFileWrapper) checkErr(err error) error {
var ce *spgz.ErrCorruptCompressedBlock
if errors.As(err, &ce) {
if ce.Size() == 0 {
return rw.SpgzFile.Truncate(ce.Offset())
}
buf := make([]byte, ce.Size())
_, err = rw.SpgzFile.WriteAt(buf, ce.Offset())
}
return err
}
func (rw *FixingSpgzFileWrapper) Read(p []byte) (n int, err error) {
for n == 0 && err == nil { // avoid returning (0, nil) after a fix
n, err = rw.SpgzFile.Read(p)
if err != nil {
err = rw.checkErr(err)
}
}
return
}
func (rw *FixingSpgzFileWrapper) Seek(offset int64, whence int) (int64, error) {
o, err := rw.SpgzFile.Seek(offset, whence)
if err != nil {
err = rw.checkErr(err)
if err == nil {
o, err = rw.SpgzFile.Seek(offset, whence)
}
}
return o, err
}
func Target(writer spgz.SparseFile, size int64, cmdReader io.Reader, cmdWriter io.Writer, useReadBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
ch := make(chan error)
go func() {
ch <- writeHeader(cmdWriter, size)
}()
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return
}
err = <-ch
if err != nil {
return
}
commonSize := size
if remoteSize < commonSize {
commonSize = remoteSize
}
if commonSize > 0 {
t := target{
base: base{
t: tree{
reader: writer,
size: commonSize,
useBuffer: useReadBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
writer: &batchingWriter{writer: writer, maxSize: DefTargetBlockSize * 16},
}
err = t.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
t.syncProgressListener = syncPl
syncPl.Start(remoteSize)
}
err = t.subtree(t.t.root, 0, commonSize)
if err != nil {
return
}
err = t.writer.Flush()
if err != nil {
return
}
if syncPl != nil {
syncPl.Update(commonSize)
}
} else {
if syncPl != nil {
syncPl.Start(remoteSize)
}
}
if size < remoteSize {
// Read the tail
pos := commonSize
_, err = writer.Seek(pos, io.SeekStart)
if err != nil {
return
}
hole := false
rd := bufio.NewReaderSize(cmdReader, DefTargetBlockSize*2)
for {
var cmd byte
cmd, err = rd.ReadByte()
if err != nil {
if err == io.EOF {
err = nil
break
}
return fmt.Errorf("target: while reading tail block header: %w", err)
}
if cmd == cmdBlock {
var n int64
n, err = io.CopyN(writer, rd, DefTargetBlockSize)
pos += n
hole = false
if err != nil {
if err == io.EOF {
err = nil
if syncPl != nil {
syncPl.Update(pos)
}
break
} else {
return fmt.Errorf("target: while copying block: %w", err)
}
}
} else {
if cmd == cmdHole {
var holeSize int64
err = binary.Read(rd, binary.LittleEndian, &holeSize)
if err != nil {
return fmt.Errorf("target: while reading hole size: %w", err)
}
_, err = writer.Seek(holeSize, io.SeekCurrent)
if err != nil {
return
}
hole = true
pos += holeSize
} else {
return fmt.Errorf("unexpected cmd: %d", cmd)
}
}
if syncPl != nil {
syncPl.Update(pos)
}
}
if hole {
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(remoteSize)
}
}
} else if size > remoteSize {
// Truncate target
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(commonSize)
}
}
return
}
func (t *target) subtree(root *node, offset, size int64) (err error) {
_, err = t.cmdWriter.Write(root.sum)
if err != nil {
return
}
var cmd byte
err = binary.Read(t.cmdReader, binary.LittleEndian, &cmd)
if err != nil {
return fmt.Errorf("target: while reading block header at %d: %w", offset, err)
}
// log.Printf("offset: %d, size: %d, cmd: %d\n", offset, size, cmd)
if cmd == cmdNotEqual || cmd == cmdHole {
if root.size > 0 {
_, err = t.writer.Seek(offset, io.SeekStart)
if err != nil {
return
}
if cmd == cmdNotEqual {
_, err = io.CopyN(t.writer, t.cmdReader, size)
if err != nil {
err = fmt.Errorf("while copying block data at %d: %w", offset, err)
}
} else {
err = t.writer.WriteHole(size)
}
} else {
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = t.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
}
return
}
| Flush | identifier_name |
sync.go | package diskrsync
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"log"
"math"
"github.com/dop251/spgz"
"golang.org/x/crypto/blake2b"
)
const (
hdrMagic = "BSNC0002"
)
const (
hashSize = 64
DefTargetBlockSize = 128 * 1024
)
const (
cmdHole byte = iota
cmdBlock
cmdEqual
cmdNotEqual
)
var (
ErrInvalidFormat = errors.New("invalid data format")
)
type ProgressListener interface {
Start(size int64)
Update(position int64)
}
type hashPool []hash.Hash
type workCtx struct {
buf []byte
n *node
hash hash.Hash
avail, hashReady chan struct{}
}
type node struct {
buf [hashSize]byte
parent *node
idx int
children []*node
size int
hash hash.Hash
sum []byte
}
type tree struct {
root *node
size int64
reader io.ReadSeeker
useBuffer bool
}
type base struct {
t tree
buf []byte
cmdReader io.Reader
cmdWriter io.Writer
syncProgressListener ProgressListener
}
type source struct {
base
reader io.ReadSeeker
}
type target struct {
base
writer *batchingWriter
}
// Accumulates successive writes into a large buffer so that the writes into the underlying spgz.SpgzFile
// cover compressed blocks completely, so they are not read and unpacked before writing.
type batchingWriter struct {
writer spgz.SparseFile
maxSize int
offset int64
holeSize int64
buf []byte
}
func (w *batchingWriter) Flush() error {
if w.holeSize > 0 {
err := w.writer.PunchHole(w.offset, w.holeSize)
if err == nil {
w.offset += w.holeSize
w.holeSize = 0
}
return err
}
if len(w.buf) == 0 {
return nil
}
n, err := w.writer.WriteAt(w.buf, w.offset)
if err != nil {
return err
}
w.buf = w.buf[:0]
w.offset += int64(n)
return nil
}
func (w *batchingWriter) prepareWrite() error {
if w.holeSize > 0 {
err := w.Flush()
if err != nil {
return err
}
}
if cap(w.buf) < w.maxSize {
buf := make([]byte, w.maxSize)
copy(buf, w.buf)
w.buf = buf[:len(w.buf)]
}
return nil
}
func (w *batchingWriter) Write(p []byte) (int, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
written := 0
for len(p) > 0 {
if len(p) >= w.maxSize && len(w.buf) == 0 {
residue := len(p) % w.maxSize
n, err := w.writer.WriteAt(p[:len(p)-residue], w.offset)
written += n
w.offset += int64(n)
if err != nil {
return written, err
}
p = p[n:]
} else {
n := copy(w.buf[len(w.buf):w.maxSize], p)
w.buf = w.buf[:len(w.buf)+n]
if len(w.buf) == w.maxSize {
n1, err := w.writer.WriteAt(w.buf, w.offset)
w.offset += int64(n1)
n2 := n1 - (len(w.buf) - n)
w.buf = w.buf[:0]
if n2 < 0 { | }
written += n2
if err != nil {
return written, err
}
} else {
written += n
}
p = p[n:]
}
}
return written, nil
}
func (w *batchingWriter) ReadFrom(src io.Reader) (int64, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
var read int64
for {
n, err := src.Read(w.buf[len(w.buf):w.maxSize])
read += int64(n)
w.buf = w.buf[:len(w.buf)+n]
if err == io.EOF {
return read, nil
}
if err != nil {
return read, err
}
if len(w.buf) == w.maxSize {
err = w.Flush()
if err != nil {
return read, err
}
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems[workIdx].n = nil
workItems[workIdx].hashReady <- struct{}{}
if rr < t.size {
return fmt.Errorf("read less data (%d) than expected (%d)", rr, t.size)
}
return nil
}
func readHeader(reader io.Reader) (size int64, err error) {
buf := make([]byte, len(hdrMagic)+8)
_, err = io.ReadFull(reader, buf)
if err != nil {
return
}
if string(buf[:len(hdrMagic)]) != hdrMagic {
err = ErrInvalidFormat
return
}
size = int64(binary.LittleEndian.Uint64(buf[len(hdrMagic):]))
return
}
func writeHeader(writer io.Writer, size int64) (err error) {
buf := make([]byte, len(hdrMagic)+8)
copy(buf, hdrMagic)
binary.LittleEndian.PutUint64(buf[len(hdrMagic):], uint64(size))
_, err = writer.Write(buf)
return
}
func Source(reader io.ReadSeeker, size int64, cmdReader io.Reader, cmdWriter io.Writer, useBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
err = writeHeader(cmdWriter, size)
if err != nil {
return
}
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return fmt.Errorf("could not read header: %w", err)
}
var commonSize int64
if remoteSize < size {
commonSize = remoteSize
} else {
commonSize = size
}
if commonSize > 0 {
s := source{
base: base{
t: tree{
reader: reader,
size: commonSize,
useBuffer: useBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
reader: reader,
}
err = s.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
s.syncProgressListener = syncPl
syncPl.Start(size)
}
err = s.subtree(s.t.root, 0, commonSize)
if err != nil {
return
}
} else {
if syncPl != nil {
syncPl.Start(size)
}
}
if size > commonSize {
// Write the tail
_, err = reader.Seek(commonSize, io.SeekStart)
if err != nil {
return
}
holeStart := int64(-1)
curPos := commonSize
buf := make([]byte, DefTargetBlockSize)
bw := bufio.NewWriterSize(cmdWriter, DefTargetBlockSize*2)
for {
var r int
var stop bool
r, err = io.ReadFull(reader, buf)
if err != nil {
if err == io.EOF {
break
}
if err != io.ErrUnexpectedEOF {
return fmt.Errorf("source, reading tail: %w", err)
}
buf = buf[:r]
stop = true
err = nil
}
if spgz.IsBlockZero(buf) {
if holeStart == -1 {
holeStart = curPos
}
} else {
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
holeStart = -1
}
err = bw.WriteByte(cmdBlock)
if err != nil {
return
}
_, err = bw.Write(buf)
if err != nil {
return
}
}
if err != nil {
return
}
curPos += int64(r)
if syncPl != nil {
syncPl.Update(curPos)
}
if stop {
break
}
}
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
}
err = bw.Flush()
}
return
}
func (s *source) subtree(root *node, offset, size int64) (err error) {
remoteHash := make([]byte, hashSize)
_, err = io.ReadFull(s.cmdReader, remoteHash)
if err != nil {
return fmt.Errorf("source/subtree, reading hash: %w", err)
}
if bytes.Equal(root.sum, remoteHash) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdEqual)
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
return
}
if root.size > 0 {
// log.Printf("Blocks at %d don't match\n", offset)
if int64(root.size) != size {
panic("Leaf node size mismatch")
}
_, err = s.reader.Seek(offset, io.SeekStart)
if err != nil {
return
}
buf := s.buffer(size)
_, err = io.ReadFull(s.reader, buf)
if err != nil {
return fmt.Errorf("source read failed at %d: %w", offset, err)
}
if spgz.IsBlockZero(buf) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdHole)
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
_, err = s.cmdWriter.Write(buf)
}
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = s.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
return
}
type TargetFile interface {
io.ReadWriteSeeker
io.WriterAt
io.Closer
spgz.Truncatable
}
// FixingSpgzFileWrapper conceals read errors caused by compressed data corruption by re-writing the corrupt
// blocks with zeros. Such errors are usually caused by abrupt termination of the writing process.
// This wrapper is used as the sync target so the corrupt blocks will be updated during the sync process.
type FixingSpgzFileWrapper struct {
*spgz.SpgzFile
}
func (rw *FixingSpgzFileWrapper) checkErr(err error) error {
var ce *spgz.ErrCorruptCompressedBlock
if errors.As(err, &ce) {
if ce.Size() == 0 {
return rw.SpgzFile.Truncate(ce.Offset())
}
buf := make([]byte, ce.Size())
_, err = rw.SpgzFile.WriteAt(buf, ce.Offset())
}
return err
}
func (rw *FixingSpgzFileWrapper) Read(p []byte) (n int, err error) {
for n == 0 && err == nil { // avoid returning (0, nil) after a fix
n, err = rw.SpgzFile.Read(p)
if err != nil {
err = rw.checkErr(err)
}
}
return
}
func (rw *FixingSpgzFileWrapper) Seek(offset int64, whence int) (int64, error) {
o, err := rw.SpgzFile.Seek(offset, whence)
if err != nil {
err = rw.checkErr(err)
if err == nil {
o, err = rw.SpgzFile.Seek(offset, whence)
}
}
return o, err
}
func Target(writer spgz.SparseFile, size int64, cmdReader io.Reader, cmdWriter io.Writer, useReadBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
ch := make(chan error)
go func() {
ch <- writeHeader(cmdWriter, size)
}()
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return
}
err = <-ch
if err != nil {
return
}
commonSize := size
if remoteSize < commonSize {
commonSize = remoteSize
}
if commonSize > 0 {
t := target{
base: base{
t: tree{
reader: writer,
size: commonSize,
useBuffer: useReadBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
writer: &batchingWriter{writer: writer, maxSize: DefTargetBlockSize * 16},
}
err = t.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
t.syncProgressListener = syncPl
syncPl.Start(remoteSize)
}
err = t.subtree(t.t.root, 0, commonSize)
if err != nil {
return
}
err = t.writer.Flush()
if err != nil {
return
}
if syncPl != nil {
syncPl.Update(commonSize)
}
} else {
if syncPl != nil {
syncPl.Start(remoteSize)
}
}
if size < remoteSize {
// Read the tail
pos := commonSize
_, err = writer.Seek(pos, io.SeekStart)
if err != nil {
return
}
hole := false
rd := bufio.NewReaderSize(cmdReader, DefTargetBlockSize*2)
for {
var cmd byte
cmd, err = rd.ReadByte()
if err != nil {
if err == io.EOF {
err = nil
break
}
return fmt.Errorf("target: while reading tail block header: %w", err)
}
if cmd == cmdBlock {
var n int64
n, err = io.CopyN(writer, rd, DefTargetBlockSize)
pos += n
hole = false
if err != nil {
if err == io.EOF {
err = nil
if syncPl != nil {
syncPl.Update(pos)
}
break
} else {
return fmt.Errorf("target: while copying block: %w", err)
}
}
} else {
if cmd == cmdHole {
var holeSize int64
err = binary.Read(rd, binary.LittleEndian, &holeSize)
if err != nil {
return fmt.Errorf("target: while reading hole size: %w", err)
}
_, err = writer.Seek(holeSize, io.SeekCurrent)
if err != nil {
return
}
hole = true
pos += holeSize
} else {
return fmt.Errorf("unexpected cmd: %d", cmd)
}
}
if syncPl != nil {
syncPl.Update(pos)
}
}
if hole {
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(remoteSize)
}
}
} else if size > remoteSize {
// Truncate target
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(commonSize)
}
}
return
}
func (t *target) subtree(root *node, offset, size int64) (err error) {
_, err = t.cmdWriter.Write(root.sum)
if err != nil {
return
}
var cmd byte
err = binary.Read(t.cmdReader, binary.LittleEndian, &cmd)
if err != nil {
return fmt.Errorf("target: while reading block header at %d: %w", offset, err)
}
// log.Printf("offset: %d, size: %d, cmd: %d\n", offset, size, cmd)
if cmd == cmdNotEqual || cmd == cmdHole {
if root.size > 0 {
_, err = t.writer.Seek(offset, io.SeekStart)
if err != nil {
return
}
if cmd == cmdNotEqual {
_, err = io.CopyN(t.writer, t.cmdReader, size)
if err != nil {
err = fmt.Errorf("while copying block data at %d: %w", offset, err)
}
} else {
err = t.writer.WriteHole(size)
}
} else {
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = t.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
}
return
} | n2 = 0 | random_line_split |
sync.go | package diskrsync
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"log"
"math"
"github.com/dop251/spgz"
"golang.org/x/crypto/blake2b"
)
const (
hdrMagic = "BSNC0002"
)
const (
hashSize = 64
DefTargetBlockSize = 128 * 1024
)
const (
cmdHole byte = iota
cmdBlock
cmdEqual
cmdNotEqual
)
var (
ErrInvalidFormat = errors.New("invalid data format")
)
type ProgressListener interface {
Start(size int64)
Update(position int64)
}
type hashPool []hash.Hash
type workCtx struct {
buf []byte
n *node
hash hash.Hash
avail, hashReady chan struct{}
}
type node struct {
buf [hashSize]byte
parent *node
idx int
children []*node
size int
hash hash.Hash
sum []byte
}
type tree struct {
root *node
size int64
reader io.ReadSeeker
useBuffer bool
}
type base struct {
t tree
buf []byte
cmdReader io.Reader
cmdWriter io.Writer
syncProgressListener ProgressListener
}
type source struct {
base
reader io.ReadSeeker
}
type target struct {
base
writer *batchingWriter
}
// Accumulates successive writes into a large buffer so that the writes into the underlying spgz.SpgzFile
// cover compressed blocks completely, so they are not read and unpacked before writing.
type batchingWriter struct {
writer spgz.SparseFile
maxSize int
offset int64
holeSize int64
buf []byte
}
func (w *batchingWriter) Flush() error {
if w.holeSize > 0 {
err := w.writer.PunchHole(w.offset, w.holeSize)
if err == nil {
w.offset += w.holeSize
w.holeSize = 0
}
return err
}
if len(w.buf) == 0 {
return nil
}
n, err := w.writer.WriteAt(w.buf, w.offset)
if err != nil {
return err
}
w.buf = w.buf[:0]
w.offset += int64(n)
return nil
}
func (w *batchingWriter) prepareWrite() error {
if w.holeSize > 0 {
err := w.Flush()
if err != nil {
return err
}
}
if cap(w.buf) < w.maxSize {
buf := make([]byte, w.maxSize)
copy(buf, w.buf)
w.buf = buf[:len(w.buf)]
}
return nil
}
func (w *batchingWriter) Write(p []byte) (int, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
written := 0
for len(p) > 0 {
if len(p) >= w.maxSize && len(w.buf) == 0 {
residue := len(p) % w.maxSize
n, err := w.writer.WriteAt(p[:len(p)-residue], w.offset)
written += n
w.offset += int64(n)
if err != nil {
return written, err
}
p = p[n:]
} else {
n := copy(w.buf[len(w.buf):w.maxSize], p)
w.buf = w.buf[:len(w.buf)+n]
if len(w.buf) == w.maxSize {
n1, err := w.writer.WriteAt(w.buf, w.offset)
w.offset += int64(n1)
n2 := n1 - (len(w.buf) - n)
w.buf = w.buf[:0]
if n2 < 0 {
n2 = 0
}
written += n2
if err != nil {
return written, err
}
} else {
written += n
}
p = p[n:]
}
}
return written, nil
}
func (w *batchingWriter) ReadFrom(src io.Reader) (int64, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
var read int64
for {
n, err := src.Read(w.buf[len(w.buf):w.maxSize])
read += int64(n)
w.buf = w.buf[:len(w.buf)+n]
if err == io.EOF {
return read, nil
}
if err != nil {
return read, err
}
if len(w.buf) == w.maxSize {
err = w.Flush()
if err != nil {
return read, err
}
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) |
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems[workIdx].n = nil
workItems[workIdx].hashReady <- struct{}{}
if rr < t.size {
return fmt.Errorf("read less data (%d) than expected (%d)", rr, t.size)
}
return nil
}
func readHeader(reader io.Reader) (size int64, err error) {
buf := make([]byte, len(hdrMagic)+8)
_, err = io.ReadFull(reader, buf)
if err != nil {
return
}
if string(buf[:len(hdrMagic)]) != hdrMagic {
err = ErrInvalidFormat
return
}
size = int64(binary.LittleEndian.Uint64(buf[len(hdrMagic):]))
return
}
func writeHeader(writer io.Writer, size int64) (err error) {
buf := make([]byte, len(hdrMagic)+8)
copy(buf, hdrMagic)
binary.LittleEndian.PutUint64(buf[len(hdrMagic):], uint64(size))
_, err = writer.Write(buf)
return
}
func Source(reader io.ReadSeeker, size int64, cmdReader io.Reader, cmdWriter io.Writer, useBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
err = writeHeader(cmdWriter, size)
if err != nil {
return
}
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return fmt.Errorf("could not read header: %w", err)
}
var commonSize int64
if remoteSize < size {
commonSize = remoteSize
} else {
commonSize = size
}
if commonSize > 0 {
s := source{
base: base{
t: tree{
reader: reader,
size: commonSize,
useBuffer: useBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
reader: reader,
}
err = s.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
s.syncProgressListener = syncPl
syncPl.Start(size)
}
err = s.subtree(s.t.root, 0, commonSize)
if err != nil {
return
}
} else {
if syncPl != nil {
syncPl.Start(size)
}
}
if size > commonSize {
// Write the tail
_, err = reader.Seek(commonSize, io.SeekStart)
if err != nil {
return
}
holeStart := int64(-1)
curPos := commonSize
buf := make([]byte, DefTargetBlockSize)
bw := bufio.NewWriterSize(cmdWriter, DefTargetBlockSize*2)
for {
var r int
var stop bool
r, err = io.ReadFull(reader, buf)
if err != nil {
if err == io.EOF {
break
}
if err != io.ErrUnexpectedEOF {
return fmt.Errorf("source, reading tail: %w", err)
}
buf = buf[:r]
stop = true
err = nil
}
if spgz.IsBlockZero(buf) {
if holeStart == -1 {
holeStart = curPos
}
} else {
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
holeStart = -1
}
err = bw.WriteByte(cmdBlock)
if err != nil {
return
}
_, err = bw.Write(buf)
if err != nil {
return
}
}
if err != nil {
return
}
curPos += int64(r)
if syncPl != nil {
syncPl.Update(curPos)
}
if stop {
break
}
}
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
}
err = bw.Flush()
}
return
}
func (s *source) subtree(root *node, offset, size int64) (err error) {
remoteHash := make([]byte, hashSize)
_, err = io.ReadFull(s.cmdReader, remoteHash)
if err != nil {
return fmt.Errorf("source/subtree, reading hash: %w", err)
}
if bytes.Equal(root.sum, remoteHash) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdEqual)
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
return
}
if root.size > 0 {
// log.Printf("Blocks at %d don't match\n", offset)
if int64(root.size) != size {
panic("Leaf node size mismatch")
}
_, err = s.reader.Seek(offset, io.SeekStart)
if err != nil {
return
}
buf := s.buffer(size)
_, err = io.ReadFull(s.reader, buf)
if err != nil {
return fmt.Errorf("source read failed at %d: %w", offset, err)
}
if spgz.IsBlockZero(buf) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdHole)
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
_, err = s.cmdWriter.Write(buf)
}
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = s.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
return
}
type TargetFile interface {
io.ReadWriteSeeker
io.WriterAt
io.Closer
spgz.Truncatable
}
// FixingSpgzFileWrapper conceals read errors caused by compressed data corruption by re-writing the corrupt
// blocks with zeros. Such errors are usually caused by abrupt termination of the writing process.
// This wrapper is used as the sync target so the corrupt blocks will be updated during the sync process.
type FixingSpgzFileWrapper struct {
*spgz.SpgzFile
}
func (rw *FixingSpgzFileWrapper) checkErr(err error) error {
var ce *spgz.ErrCorruptCompressedBlock
if errors.As(err, &ce) {
if ce.Size() == 0 {
return rw.SpgzFile.Truncate(ce.Offset())
}
buf := make([]byte, ce.Size())
_, err = rw.SpgzFile.WriteAt(buf, ce.Offset())
}
return err
}
func (rw *FixingSpgzFileWrapper) Read(p []byte) (n int, err error) {
for n == 0 && err == nil { // avoid returning (0, nil) after a fix
n, err = rw.SpgzFile.Read(p)
if err != nil {
err = rw.checkErr(err)
}
}
return
}
func (rw *FixingSpgzFileWrapper) Seek(offset int64, whence int) (int64, error) {
o, err := rw.SpgzFile.Seek(offset, whence)
if err != nil {
err = rw.checkErr(err)
if err == nil {
o, err = rw.SpgzFile.Seek(offset, whence)
}
}
return o, err
}
func Target(writer spgz.SparseFile, size int64, cmdReader io.Reader, cmdWriter io.Writer, useReadBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
ch := make(chan error)
go func() {
ch <- writeHeader(cmdWriter, size)
}()
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return
}
err = <-ch
if err != nil {
return
}
commonSize := size
if remoteSize < commonSize {
commonSize = remoteSize
}
if commonSize > 0 {
t := target{
base: base{
t: tree{
reader: writer,
size: commonSize,
useBuffer: useReadBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
writer: &batchingWriter{writer: writer, maxSize: DefTargetBlockSize * 16},
}
err = t.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
t.syncProgressListener = syncPl
syncPl.Start(remoteSize)
}
err = t.subtree(t.t.root, 0, commonSize)
if err != nil {
return
}
err = t.writer.Flush()
if err != nil {
return
}
if syncPl != nil {
syncPl.Update(commonSize)
}
} else {
if syncPl != nil {
syncPl.Start(remoteSize)
}
}
if size < remoteSize {
// Read the tail
pos := commonSize
_, err = writer.Seek(pos, io.SeekStart)
if err != nil {
return
}
hole := false
rd := bufio.NewReaderSize(cmdReader, DefTargetBlockSize*2)
for {
var cmd byte
cmd, err = rd.ReadByte()
if err != nil {
if err == io.EOF {
err = nil
break
}
return fmt.Errorf("target: while reading tail block header: %w", err)
}
if cmd == cmdBlock {
var n int64
n, err = io.CopyN(writer, rd, DefTargetBlockSize)
pos += n
hole = false
if err != nil {
if err == io.EOF {
err = nil
if syncPl != nil {
syncPl.Update(pos)
}
break
} else {
return fmt.Errorf("target: while copying block: %w", err)
}
}
} else {
if cmd == cmdHole {
var holeSize int64
err = binary.Read(rd, binary.LittleEndian, &holeSize)
if err != nil {
return fmt.Errorf("target: while reading hole size: %w", err)
}
_, err = writer.Seek(holeSize, io.SeekCurrent)
if err != nil {
return
}
hole = true
pos += holeSize
} else {
return fmt.Errorf("unexpected cmd: %d", cmd)
}
}
if syncPl != nil {
syncPl.Update(pos)
}
}
if hole {
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(remoteSize)
}
}
} else if size > remoteSize {
// Truncate target
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(commonSize)
}
}
return
}
func (t *target) subtree(root *node, offset, size int64) (err error) {
_, err = t.cmdWriter.Write(root.sum)
if err != nil {
return
}
var cmd byte
err = binary.Read(t.cmdReader, binary.LittleEndian, &cmd)
if err != nil {
return fmt.Errorf("target: while reading block header at %d: %w", offset, err)
}
// log.Printf("offset: %d, size: %d, cmd: %d\n", offset, size, cmd)
if cmd == cmdNotEqual || cmd == cmdHole {
if root.size > 0 {
_, err = t.writer.Seek(offset, io.SeekStart)
if err != nil {
return
}
if cmd == cmdNotEqual {
_, err = io.CopyN(t.writer, t.cmdReader, size)
if err != nil {
err = fmt.Errorf("while copying block data at %d: %w", offset, err)
}
} else {
err = t.writer.WriteHole(size)
}
} else {
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = t.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
}
return
}
| {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
} | identifier_body |
sync.go | package diskrsync
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"log"
"math"
"github.com/dop251/spgz"
"golang.org/x/crypto/blake2b"
)
const (
hdrMagic = "BSNC0002"
)
const (
hashSize = 64
DefTargetBlockSize = 128 * 1024
)
const (
cmdHole byte = iota
cmdBlock
cmdEqual
cmdNotEqual
)
var (
ErrInvalidFormat = errors.New("invalid data format")
)
type ProgressListener interface {
Start(size int64)
Update(position int64)
}
type hashPool []hash.Hash
type workCtx struct {
buf []byte
n *node
hash hash.Hash
avail, hashReady chan struct{}
}
type node struct {
buf [hashSize]byte
parent *node
idx int
children []*node
size int
hash hash.Hash
sum []byte
}
type tree struct {
root *node
size int64
reader io.ReadSeeker
useBuffer bool
}
type base struct {
t tree
buf []byte
cmdReader io.Reader
cmdWriter io.Writer
syncProgressListener ProgressListener
}
type source struct {
base
reader io.ReadSeeker
}
type target struct {
base
writer *batchingWriter
}
// Accumulates successive writes into a large buffer so that the writes into the underlying spgz.SpgzFile
// cover compressed blocks completely, so they are not read and unpacked before writing.
type batchingWriter struct {
writer spgz.SparseFile
maxSize int
offset int64
holeSize int64
buf []byte
}
func (w *batchingWriter) Flush() error {
if w.holeSize > 0 {
err := w.writer.PunchHole(w.offset, w.holeSize)
if err == nil {
w.offset += w.holeSize
w.holeSize = 0
}
return err
}
if len(w.buf) == 0 {
return nil
}
n, err := w.writer.WriteAt(w.buf, w.offset)
if err != nil {
return err
}
w.buf = w.buf[:0]
w.offset += int64(n)
return nil
}
func (w *batchingWriter) prepareWrite() error {
if w.holeSize > 0 {
err := w.Flush()
if err != nil {
return err
}
}
if cap(w.buf) < w.maxSize {
buf := make([]byte, w.maxSize)
copy(buf, w.buf)
w.buf = buf[:len(w.buf)]
}
return nil
}
func (w *batchingWriter) Write(p []byte) (int, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
written := 0
for len(p) > 0 {
if len(p) >= w.maxSize && len(w.buf) == 0 {
residue := len(p) % w.maxSize
n, err := w.writer.WriteAt(p[:len(p)-residue], w.offset)
written += n
w.offset += int64(n)
if err != nil {
return written, err
}
p = p[n:]
} else {
n := copy(w.buf[len(w.buf):w.maxSize], p)
w.buf = w.buf[:len(w.buf)+n]
if len(w.buf) == w.maxSize {
n1, err := w.writer.WriteAt(w.buf, w.offset)
w.offset += int64(n1)
n2 := n1 - (len(w.buf) - n)
w.buf = w.buf[:0]
if n2 < 0 {
n2 = 0
}
written += n2
if err != nil {
return written, err
}
} else {
written += n
}
p = p[n:]
}
}
return written, nil
}
func (w *batchingWriter) ReadFrom(src io.Reader) (int64, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
var read int64
for {
n, err := src.Read(w.buf[len(w.buf):w.maxSize])
read += int64(n)
w.buf = w.buf[:len(w.buf)+n]
if err == io.EOF {
return read, nil
}
if err != nil {
return read, err
}
if len(w.buf) == w.maxSize {
err = w.Flush()
if err != nil |
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems[workIdx].n = nil
workItems[workIdx].hashReady <- struct{}{}
if rr < t.size {
return fmt.Errorf("read less data (%d) than expected (%d)", rr, t.size)
}
return nil
}
func readHeader(reader io.Reader) (size int64, err error) {
buf := make([]byte, len(hdrMagic)+8)
_, err = io.ReadFull(reader, buf)
if err != nil {
return
}
if string(buf[:len(hdrMagic)]) != hdrMagic {
err = ErrInvalidFormat
return
}
size = int64(binary.LittleEndian.Uint64(buf[len(hdrMagic):]))
return
}
func writeHeader(writer io.Writer, size int64) (err error) {
buf := make([]byte, len(hdrMagic)+8)
copy(buf, hdrMagic)
binary.LittleEndian.PutUint64(buf[len(hdrMagic):], uint64(size))
_, err = writer.Write(buf)
return
}
func Source(reader io.ReadSeeker, size int64, cmdReader io.Reader, cmdWriter io.Writer, useBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
err = writeHeader(cmdWriter, size)
if err != nil {
return
}
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return fmt.Errorf("could not read header: %w", err)
}
var commonSize int64
if remoteSize < size {
commonSize = remoteSize
} else {
commonSize = size
}
if commonSize > 0 {
s := source{
base: base{
t: tree{
reader: reader,
size: commonSize,
useBuffer: useBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
reader: reader,
}
err = s.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
s.syncProgressListener = syncPl
syncPl.Start(size)
}
err = s.subtree(s.t.root, 0, commonSize)
if err != nil {
return
}
} else {
if syncPl != nil {
syncPl.Start(size)
}
}
if size > commonSize {
// Write the tail
_, err = reader.Seek(commonSize, io.SeekStart)
if err != nil {
return
}
holeStart := int64(-1)
curPos := commonSize
buf := make([]byte, DefTargetBlockSize)
bw := bufio.NewWriterSize(cmdWriter, DefTargetBlockSize*2)
for {
var r int
var stop bool
r, err = io.ReadFull(reader, buf)
if err != nil {
if err == io.EOF {
break
}
if err != io.ErrUnexpectedEOF {
return fmt.Errorf("source, reading tail: %w", err)
}
buf = buf[:r]
stop = true
err = nil
}
if spgz.IsBlockZero(buf) {
if holeStart == -1 {
holeStart = curPos
}
} else {
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
holeStart = -1
}
err = bw.WriteByte(cmdBlock)
if err != nil {
return
}
_, err = bw.Write(buf)
if err != nil {
return
}
}
if err != nil {
return
}
curPos += int64(r)
if syncPl != nil {
syncPl.Update(curPos)
}
if stop {
break
}
}
if holeStart != -1 {
err = bw.WriteByte(cmdHole)
if err != nil {
return
}
err = binary.Write(bw, binary.LittleEndian, curPos-holeStart)
if err != nil {
return
}
}
err = bw.Flush()
}
return
}
func (s *source) subtree(root *node, offset, size int64) (err error) {
remoteHash := make([]byte, hashSize)
_, err = io.ReadFull(s.cmdReader, remoteHash)
if err != nil {
return fmt.Errorf("source/subtree, reading hash: %w", err)
}
if bytes.Equal(root.sum, remoteHash) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdEqual)
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
return
}
if root.size > 0 {
// log.Printf("Blocks at %d don't match\n", offset)
if int64(root.size) != size {
panic("Leaf node size mismatch")
}
_, err = s.reader.Seek(offset, io.SeekStart)
if err != nil {
return
}
buf := s.buffer(size)
_, err = io.ReadFull(s.reader, buf)
if err != nil {
return fmt.Errorf("source read failed at %d: %w", offset, err)
}
if spgz.IsBlockZero(buf) {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdHole)
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
_, err = s.cmdWriter.Write(buf)
}
if s.syncProgressListener != nil {
s.syncProgressListener.Update(offset + size)
}
} else {
err = binary.Write(s.cmdWriter, binary.LittleEndian, cmdNotEqual)
if err != nil {
return
}
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = s.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
return
}
type TargetFile interface {
io.ReadWriteSeeker
io.WriterAt
io.Closer
spgz.Truncatable
}
// FixingSpgzFileWrapper conceals read errors caused by compressed data corruption by re-writing the corrupt
// blocks with zeros. Such errors are usually caused by abrupt termination of the writing process.
// This wrapper is used as the sync target so the corrupt blocks will be updated during the sync process.
type FixingSpgzFileWrapper struct {
*spgz.SpgzFile
}
func (rw *FixingSpgzFileWrapper) checkErr(err error) error {
var ce *spgz.ErrCorruptCompressedBlock
if errors.As(err, &ce) {
if ce.Size() == 0 {
return rw.SpgzFile.Truncate(ce.Offset())
}
buf := make([]byte, ce.Size())
_, err = rw.SpgzFile.WriteAt(buf, ce.Offset())
}
return err
}
func (rw *FixingSpgzFileWrapper) Read(p []byte) (n int, err error) {
for n == 0 && err == nil { // avoid returning (0, nil) after a fix
n, err = rw.SpgzFile.Read(p)
if err != nil {
err = rw.checkErr(err)
}
}
return
}
func (rw *FixingSpgzFileWrapper) Seek(offset int64, whence int) (int64, error) {
o, err := rw.SpgzFile.Seek(offset, whence)
if err != nil {
err = rw.checkErr(err)
if err == nil {
o, err = rw.SpgzFile.Seek(offset, whence)
}
}
return o, err
}
func Target(writer spgz.SparseFile, size int64, cmdReader io.Reader, cmdWriter io.Writer, useReadBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
ch := make(chan error)
go func() {
ch <- writeHeader(cmdWriter, size)
}()
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil {
return
}
err = <-ch
if err != nil {
return
}
commonSize := size
if remoteSize < commonSize {
commonSize = remoteSize
}
if commonSize > 0 {
t := target{
base: base{
t: tree{
reader: writer,
size: commonSize,
useBuffer: useReadBuffer,
},
cmdReader: cmdReader,
cmdWriter: cmdWriter,
},
writer: &batchingWriter{writer: writer, maxSize: DefTargetBlockSize * 16},
}
err = t.t.calc(verbose, calcPl)
if err != nil {
return
}
if syncPl != nil {
t.syncProgressListener = syncPl
syncPl.Start(remoteSize)
}
err = t.subtree(t.t.root, 0, commonSize)
if err != nil {
return
}
err = t.writer.Flush()
if err != nil {
return
}
if syncPl != nil {
syncPl.Update(commonSize)
}
} else {
if syncPl != nil {
syncPl.Start(remoteSize)
}
}
if size < remoteSize {
// Read the tail
pos := commonSize
_, err = writer.Seek(pos, io.SeekStart)
if err != nil {
return
}
hole := false
rd := bufio.NewReaderSize(cmdReader, DefTargetBlockSize*2)
for {
var cmd byte
cmd, err = rd.ReadByte()
if err != nil {
if err == io.EOF {
err = nil
break
}
return fmt.Errorf("target: while reading tail block header: %w", err)
}
if cmd == cmdBlock {
var n int64
n, err = io.CopyN(writer, rd, DefTargetBlockSize)
pos += n
hole = false
if err != nil {
if err == io.EOF {
err = nil
if syncPl != nil {
syncPl.Update(pos)
}
break
} else {
return fmt.Errorf("target: while copying block: %w", err)
}
}
} else {
if cmd == cmdHole {
var holeSize int64
err = binary.Read(rd, binary.LittleEndian, &holeSize)
if err != nil {
return fmt.Errorf("target: while reading hole size: %w", err)
}
_, err = writer.Seek(holeSize, io.SeekCurrent)
if err != nil {
return
}
hole = true
pos += holeSize
} else {
return fmt.Errorf("unexpected cmd: %d", cmd)
}
}
if syncPl != nil {
syncPl.Update(pos)
}
}
if hole {
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(remoteSize)
}
}
} else if size > remoteSize {
// Truncate target
if f, ok := writer.(spgz.Truncatable); ok {
err = f.Truncate(commonSize)
}
}
return
}
func (t *target) subtree(root *node, offset, size int64) (err error) {
_, err = t.cmdWriter.Write(root.sum)
if err != nil {
return
}
var cmd byte
err = binary.Read(t.cmdReader, binary.LittleEndian, &cmd)
if err != nil {
return fmt.Errorf("target: while reading block header at %d: %w", offset, err)
}
// log.Printf("offset: %d, size: %d, cmd: %d\n", offset, size, cmd)
if cmd == cmdNotEqual || cmd == cmdHole {
if root.size > 0 {
_, err = t.writer.Seek(offset, io.SeekStart)
if err != nil {
return
}
if cmd == cmdNotEqual {
_, err = io.CopyN(t.writer, t.cmdReader, size)
if err != nil {
err = fmt.Errorf("while copying block data at %d: %w", offset, err)
}
} else {
err = t.writer.WriteHole(size)
}
} else {
b := offset
order := byte(len(root.children))
for i := byte(0); i < order; i++ {
l := offset + (size * int64(i+1) / int64(order)) - b
err = t.subtree(root.children[i], b, l)
if err != nil {
return
}
b += l
}
}
}
return
}
| {
return read, err
} | conditional_block |
event.py | """
This module handles user input.
To handle user input you will likely want to use the L{event.get} function
or create a subclass of L{event.App}.
- L{event.get} iterates over recent events.
- L{event.App} passes events to the overridable methods: ev_* and key_*.
But there are other options such as L{event.keyWait} and L{event.isWindowClosed}.
A few event attributes are actually string constants.
Here's a reference for those:
- L{Event.type}
'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
- L{MouseButtonEvent.button} (found in L{MouseDown} and L{MouseUp} events)
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
- L{KeyEvent.key} (found in L{KeyDown} and L{KeyUp} events)
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
"""
import time as _time
from tcod import ffi as _ffi
from tcod import lib as _lib
import tdl as _tdl
from . import style as _style
_eventQueue = []
_pushedEvents = []
_mousel = 0
_mousem = 0
_mouser = 0
# this interprets the constants from libtcod and makes a key -> keyname dictionary
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs
return _keyNames
_keyNames = _parseKeyNames(_lib)
class Event(object):
"""Base Event class.
You can easily subclass this to make your own events. Be sure to set
the class attribute L{Event.type} for it to be passed to a custom L{App}
ev_* method."""
type = None
"""String constant representing the type of event.
The L{App} ev_* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
|
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)"""
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But a single App instance can not be run multiple times simultaneously.
"""
if getattr(self, '_App__running', False):
raise _tdl.TDLError('An App can not be run multiple times simultaneously')
self.__running = True
while self.__running:
self.runOnce()
def run_once(self):
"""Pump events to this App instance and then return.
This works in the way described in L{App.run} except it immediately
returns after the first L{update} call.
Having multiple L{App} instances and selectively calling runOnce on
them is a decent way to create a state machine.
"""
if not hasattr(self, '_App__prevTime'):
self.__prevTime = _time.clock() # initiate __prevTime
for event in get():
if event.type: # exclude custom events with a blank type variable
# call the ev_* methods
method = 'ev_%s' % event.type # ev_TYPE
getattr(self, method)(event)
if event.type == 'KEYDOWN':
# call the key_* methods
method = 'key_%s' % event.key # key_KEYNAME
if hasattr(self, method): # silently exclude undefined methods
getattr(self, method)(event)
newTime = _time.clock()
self.update(newTime - self.__prevTime)
self.__prevTime = newTime
#_tdl.flush()
def _processEvents():
"""Flushes the event queue from libtcod into the global list _eventQueue"""
global _mousel, _mousem, _mouser, _eventsflushed, _pushedEvents
_eventsflushed = True
events = _pushedEvents # get events from event.push
_pushedEvents = [] # then clear the pushed events queue
mouse = _ffi.new('TCOD_mouse_t *')
libkey = _ffi.new('TCOD_key_t *')
while 1:
libevent = _lib.TCOD_sys_check_for_event(_lib.TCOD_EVENT_ANY, libkey, mouse)
if not libevent: # no more events from libtcod
break
#if mouse.dx or mouse.dy:
if libevent & _lib.TCOD_EVENT_MOUSE_MOVE:
events.append(MouseMotion((mouse.x, mouse.y),
(mouse.cx, mouse.cy),
(mouse.dx, mouse.dy),
(mouse.dcx, mouse.dcy)))
mousepos = ((mouse.x, mouse.y), (mouse.cx, mouse.cy))
for oldstate, newstate, released, button in \
zip((_mousel, _mousem, _mouser),
(mouse.lbutton, mouse.mbutton, mouse.rbutton),
(mouse.lbutton_pressed, mouse.mbutton_pressed,
mouse.rbutton_pressed),
(1, 2, 3)):
if released:
if not oldstate:
events.append(MouseDown(button, *mousepos))
events.append(MouseUp(button, *mousepos))
if newstate:
events.append(MouseDown(button, *mousepos))
elif newstate and not oldstate:
events.append(MouseDown(button, *mousepos))
if mouse.wheel_up:
events.append(MouseDown(4, *mousepos))
if mouse.wheel_down:
events.append(MouseDown(5, *mousepos))
_mousel = mouse.lbutton
_mousem = mouse.mbutton
_mouser = mouse.rbutton
if libkey.vk == _lib.TCODK_NONE:
break
if libkey.pressed:
keyevent = KeyDown
else:
keyevent = KeyUp
if libkey.vk == _lib.TCODK_TEXT:
# Hack 2017-03-22 HexDecimal
# Fix undefined libtcod behaviour which breaks 32-bit builds.
libkey.c = b'\x00'
libkey.shift = False
libkey.lalt = libkey.ralt = False
libkey.lctrl = libkey.rctrl = False
libkey.lmeta = libkey.rmeta = False
events.append(
keyevent(
libkey.vk,
libkey.c.decode('ascii', errors='ignore'),
_ffi.string(libkey.text).decode('utf-8'),
libkey.shift,
libkey.lalt,
libkey.ralt,
libkey.lctrl,
libkey.rctrl,
libkey.lmeta,
libkey.rmeta,
)
)
if _lib.TCOD_console_is_window_closed():
events.append(Quit())
_eventQueue.extend(events)
def get():
"""Flushes the event queue and returns the list of events.
This function returns L{Event} objects that can be identified by their
type attribute or their class.
@rtype: iterator
@return: Returns an iterable of objects derived from L{Event} or anything
put in a L{push} call. If the iterator is deleted or otherwise
interrupted before finishing the excess items are preserved for the
next call.
"""
_processEvents()
return _event_generator()
def _event_generator():
while _eventQueue:
# if there is an interruption the rest of the events stay untouched
# this means you can break out of a event.get loop without losing
# the leftover events
yield(_eventQueue.pop(0))
raise StopIteration()
def wait(timeout=None, flush=True):
"""Wait for an event.
@type timeout: int or None
@param timeout: The time in seconds that this function will wait before
giving up and returning None.
With the default value of None, this will block forever.
@type flush: boolean
@param flush: If True a call to L{tdl.flush} will be made before listening
for events.
@rtype: L{Event} or None
@return: Returns an instance derived from L{Event}, or None if the function
has timed out.
Anything added via L{push} will also be returned.
@since: 1.4.0
"""
if timeout is not None:
timeout = timeout + _time.clock() # timeout at this time
while True:
if _eventQueue:
return _eventQueue.pop(0)
if flush:
# a full 'round' of events need to be processed before flushing
_tdl.flush()
if timeout and _time.clock() >= timeout:
return None # return None on timeout
_time.sleep(0.001) # sleep 1ms
_processEvents()
def push(event):
"""Push an event into the event buffer.
@type event: L{Event}-like object
@param event: The event will be available on the next call to L{event.get}.
An event pushed in the middle of a L{get} will not show until
the next time L{get} called preventing push related
infinite loops.
This object should at least have a 'type' attribute.
"""
_pushedEvents.append(event)
def key_wait():
"""Waits until the user presses a key.
Then returns a L{KeyDown} event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
@rtype: L{KeyDown}
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
_time.sleep(.001)
def set_key_repeat(delay=500, interval=0):
"""Does nothing.
"""
pass
def is_window_closed():
"""Returns True if the exit button on the window has been clicked and
stays True afterwards.
@rtype: boolean
"""
return _lib.TCOD_console_is_window_closed()
__all__ = [_var for _var in locals().keys() if _var[0] != '_']
App.runOnce = _style.backport(App.run_once)
keyWait = _style.backport(key_wait)
setKeyRepeat = _style.backport(set_key_repeat)
isWindowClosed = _style.backport(is_window_closed)
| parameters.append('%s=%r' % (attr, value)) | conditional_block |
event.py | """
This module handles user input.
To handle user input you will likely want to use the L{event.get} function
or create a subclass of L{event.App}.
- L{event.get} iterates over recent events.
- L{event.App} passes events to the overridable methods: ev_* and key_*.
But there are other options such as L{event.keyWait} and L{event.isWindowClosed}.
A few event attributes are actually string constants.
Here's a reference for those:
- L{Event.type} |
'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
- L{MouseButtonEvent.button} (found in L{MouseDown} and L{MouseUp} events)
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
- L{KeyEvent.key} (found in L{KeyDown} and L{KeyUp} events)
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
"""
import time as _time
from tcod import ffi as _ffi
from tcod import lib as _lib
import tdl as _tdl
from . import style as _style
_eventQueue = []
_pushedEvents = []
_mousel = 0
_mousem = 0
_mouser = 0
# this interprets the constants from libtcod and makes a key -> keyname dictionary
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs
return _keyNames
_keyNames = _parseKeyNames(_lib)
class Event(object):
"""Base Event class.
You can easily subclass this to make your own events. Be sure to set
the class attribute L{Event.type} for it to be passed to a custom L{App}
ev_* method."""
type = None
"""String constant representing the type of event.
The L{App} ev_* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
parameters.append('%s=%r' % (attr, value))
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)"""
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But a single App instance can not be run multiple times simultaneously.
"""
if getattr(self, '_App__running', False):
raise _tdl.TDLError('An App can not be run multiple times simultaneously')
self.__running = True
while self.__running:
self.runOnce()
def run_once(self):
"""Pump events to this App instance and then return.
This works in the way described in L{App.run} except it immediately
returns after the first L{update} call.
Having multiple L{App} instances and selectively calling runOnce on
them is a decent way to create a state machine.
"""
if not hasattr(self, '_App__prevTime'):
self.__prevTime = _time.clock() # initiate __prevTime
for event in get():
if event.type: # exclude custom events with a blank type variable
# call the ev_* methods
method = 'ev_%s' % event.type # ev_TYPE
getattr(self, method)(event)
if event.type == 'KEYDOWN':
# call the key_* methods
method = 'key_%s' % event.key # key_KEYNAME
if hasattr(self, method): # silently exclude undefined methods
getattr(self, method)(event)
newTime = _time.clock()
self.update(newTime - self.__prevTime)
self.__prevTime = newTime
#_tdl.flush()
def _processEvents():
"""Flushes the event queue from libtcod into the global list _eventQueue"""
global _mousel, _mousem, _mouser, _eventsflushed, _pushedEvents
_eventsflushed = True
events = _pushedEvents # get events from event.push
_pushedEvents = [] # then clear the pushed events queue
mouse = _ffi.new('TCOD_mouse_t *')
libkey = _ffi.new('TCOD_key_t *')
while 1:
libevent = _lib.TCOD_sys_check_for_event(_lib.TCOD_EVENT_ANY, libkey, mouse)
if not libevent: # no more events from libtcod
break
#if mouse.dx or mouse.dy:
if libevent & _lib.TCOD_EVENT_MOUSE_MOVE:
events.append(MouseMotion((mouse.x, mouse.y),
(mouse.cx, mouse.cy),
(mouse.dx, mouse.dy),
(mouse.dcx, mouse.dcy)))
mousepos = ((mouse.x, mouse.y), (mouse.cx, mouse.cy))
for oldstate, newstate, released, button in \
zip((_mousel, _mousem, _mouser),
(mouse.lbutton, mouse.mbutton, mouse.rbutton),
(mouse.lbutton_pressed, mouse.mbutton_pressed,
mouse.rbutton_pressed),
(1, 2, 3)):
if released:
if not oldstate:
events.append(MouseDown(button, *mousepos))
events.append(MouseUp(button, *mousepos))
if newstate:
events.append(MouseDown(button, *mousepos))
elif newstate and not oldstate:
events.append(MouseDown(button, *mousepos))
if mouse.wheel_up:
events.append(MouseDown(4, *mousepos))
if mouse.wheel_down:
events.append(MouseDown(5, *mousepos))
_mousel = mouse.lbutton
_mousem = mouse.mbutton
_mouser = mouse.rbutton
if libkey.vk == _lib.TCODK_NONE:
break
if libkey.pressed:
keyevent = KeyDown
else:
keyevent = KeyUp
if libkey.vk == _lib.TCODK_TEXT:
# Hack 2017-03-22 HexDecimal
# Fix undefined libtcod behaviour which breaks 32-bit builds.
libkey.c = b'\x00'
libkey.shift = False
libkey.lalt = libkey.ralt = False
libkey.lctrl = libkey.rctrl = False
libkey.lmeta = libkey.rmeta = False
events.append(
keyevent(
libkey.vk,
libkey.c.decode('ascii', errors='ignore'),
_ffi.string(libkey.text).decode('utf-8'),
libkey.shift,
libkey.lalt,
libkey.ralt,
libkey.lctrl,
libkey.rctrl,
libkey.lmeta,
libkey.rmeta,
)
)
if _lib.TCOD_console_is_window_closed():
events.append(Quit())
_eventQueue.extend(events)
def get():
"""Flushes the event queue and returns the list of events.
This function returns L{Event} objects that can be identified by their
type attribute or their class.
@rtype: iterator
@return: Returns an iterable of objects derived from L{Event} or anything
put in a L{push} call. If the iterator is deleted or otherwise
interrupted before finishing the excess items are preserved for the
next call.
"""
_processEvents()
return _event_generator()
def _event_generator():
while _eventQueue:
# if there is an interruption the rest of the events stay untouched
# this means you can break out of a event.get loop without losing
# the leftover events
yield(_eventQueue.pop(0))
raise StopIteration()
def wait(timeout=None, flush=True):
"""Wait for an event.
@type timeout: int or None
@param timeout: The time in seconds that this function will wait before
giving up and returning None.
With the default value of None, this will block forever.
@type flush: boolean
@param flush: If True a call to L{tdl.flush} will be made before listening
for events.
@rtype: L{Event} or None
@return: Returns an instance derived from L{Event}, or None if the function
has timed out.
Anything added via L{push} will also be returned.
@since: 1.4.0
"""
if timeout is not None:
timeout = timeout + _time.clock() # timeout at this time
while True:
if _eventQueue:
return _eventQueue.pop(0)
if flush:
# a full 'round' of events need to be processed before flushing
_tdl.flush()
if timeout and _time.clock() >= timeout:
return None # return None on timeout
_time.sleep(0.001) # sleep 1ms
_processEvents()
def push(event):
"""Push an event into the event buffer.
@type event: L{Event}-like object
@param event: The event will be available on the next call to L{event.get}.
An event pushed in the middle of a L{get} will not show until
the next time L{get} called preventing push related
infinite loops.
This object should at least have a 'type' attribute.
"""
_pushedEvents.append(event)
def key_wait():
"""Waits until the user presses a key.
Then returns a L{KeyDown} event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
@rtype: L{KeyDown}
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
_time.sleep(.001)
def set_key_repeat(delay=500, interval=0):
"""Does nothing.
"""
pass
def is_window_closed():
"""Returns True if the exit button on the window has been clicked and
stays True afterwards.
@rtype: boolean
"""
return _lib.TCOD_console_is_window_closed()
__all__ = [_var for _var in locals().keys() if _var[0] != '_']
App.runOnce = _style.backport(App.run_once)
keyWait = _style.backport(key_wait)
setKeyRepeat = _style.backport(set_key_repeat)
isWindowClosed = _style.backport(is_window_closed) | random_line_split |
|
event.py | """
This module handles user input.
To handle user input you will likely want to use the L{event.get} function
or create a subclass of L{event.App}.
- L{event.get} iterates over recent events.
- L{event.App} passes events to the overridable methods: ev_* and key_*.
But there are other options such as L{event.keyWait} and L{event.isWindowClosed}.
A few event attributes are actually string constants.
Here's a reference for those:
- L{Event.type}
'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
- L{MouseButtonEvent.button} (found in L{MouseDown} and L{MouseUp} events)
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
- L{KeyEvent.key} (found in L{KeyDown} and L{KeyUp} events)
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
"""
import time as _time
from tcod import ffi as _ffi
from tcod import lib as _lib
import tdl as _tdl
from . import style as _style
_eventQueue = []
_pushedEvents = []
_mousel = 0
_mousem = 0
_mouser = 0
# this interprets the constants from libtcod and makes a key -> keyname dictionary
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs
return _keyNames
_keyNames = _parseKeyNames(_lib)
class Event(object):
"""Base Event class.
You can easily subclass this to make your own events. Be sure to set
the class attribute L{Event.type} for it to be passed to a custom L{App}
ev_* method."""
type = None
"""String constant representing the type of event.
The L{App} ev_* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
parameters.append('%s=%r' % (attr, value))
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
|
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But a single App instance can not be run multiple times simultaneously.
"""
if getattr(self, '_App__running', False):
raise _tdl.TDLError('An App can not be run multiple times simultaneously')
self.__running = True
while self.__running:
self.runOnce()
def run_once(self):
"""Pump events to this App instance and then return.
This works in the way described in L{App.run} except it immediately
returns after the first L{update} call.
Having multiple L{App} instances and selectively calling runOnce on
them is a decent way to create a state machine.
"""
if not hasattr(self, '_App__prevTime'):
self.__prevTime = _time.clock() # initiate __prevTime
for event in get():
if event.type: # exclude custom events with a blank type variable
# call the ev_* methods
method = 'ev_%s' % event.type # ev_TYPE
getattr(self, method)(event)
if event.type == 'KEYDOWN':
# call the key_* methods
method = 'key_%s' % event.key # key_KEYNAME
if hasattr(self, method): # silently exclude undefined methods
getattr(self, method)(event)
newTime = _time.clock()
self.update(newTime - self.__prevTime)
self.__prevTime = newTime
#_tdl.flush()
def _processEvents():
"""Flushes the event queue from libtcod into the global list _eventQueue"""
global _mousel, _mousem, _mouser, _eventsflushed, _pushedEvents
_eventsflushed = True
events = _pushedEvents # get events from event.push
_pushedEvents = [] # then clear the pushed events queue
mouse = _ffi.new('TCOD_mouse_t *')
libkey = _ffi.new('TCOD_key_t *')
while 1:
libevent = _lib.TCOD_sys_check_for_event(_lib.TCOD_EVENT_ANY, libkey, mouse)
if not libevent: # no more events from libtcod
break
#if mouse.dx or mouse.dy:
if libevent & _lib.TCOD_EVENT_MOUSE_MOVE:
events.append(MouseMotion((mouse.x, mouse.y),
(mouse.cx, mouse.cy),
(mouse.dx, mouse.dy),
(mouse.dcx, mouse.dcy)))
mousepos = ((mouse.x, mouse.y), (mouse.cx, mouse.cy))
for oldstate, newstate, released, button in \
zip((_mousel, _mousem, _mouser),
(mouse.lbutton, mouse.mbutton, mouse.rbutton),
(mouse.lbutton_pressed, mouse.mbutton_pressed,
mouse.rbutton_pressed),
(1, 2, 3)):
if released:
if not oldstate:
events.append(MouseDown(button, *mousepos))
events.append(MouseUp(button, *mousepos))
if newstate:
events.append(MouseDown(button, *mousepos))
elif newstate and not oldstate:
events.append(MouseDown(button, *mousepos))
if mouse.wheel_up:
events.append(MouseDown(4, *mousepos))
if mouse.wheel_down:
events.append(MouseDown(5, *mousepos))
_mousel = mouse.lbutton
_mousem = mouse.mbutton
_mouser = mouse.rbutton
if libkey.vk == _lib.TCODK_NONE:
break
if libkey.pressed:
keyevent = KeyDown
else:
keyevent = KeyUp
if libkey.vk == _lib.TCODK_TEXT:
# Hack 2017-03-22 HexDecimal
# Fix undefined libtcod behaviour which breaks 32-bit builds.
libkey.c = b'\x00'
libkey.shift = False
libkey.lalt = libkey.ralt = False
libkey.lctrl = libkey.rctrl = False
libkey.lmeta = libkey.rmeta = False
events.append(
keyevent(
libkey.vk,
libkey.c.decode('ascii', errors='ignore'),
_ffi.string(libkey.text).decode('utf-8'),
libkey.shift,
libkey.lalt,
libkey.ralt,
libkey.lctrl,
libkey.rctrl,
libkey.lmeta,
libkey.rmeta,
)
)
if _lib.TCOD_console_is_window_closed():
events.append(Quit())
_eventQueue.extend(events)
def get():
"""Flushes the event queue and returns the list of events.
This function returns L{Event} objects that can be identified by their
type attribute or their class.
@rtype: iterator
@return: Returns an iterable of objects derived from L{Event} or anything
put in a L{push} call. If the iterator is deleted or otherwise
interrupted before finishing the excess items are preserved for the
next call.
"""
_processEvents()
return _event_generator()
def _event_generator():
while _eventQueue:
# if there is an interruption the rest of the events stay untouched
# this means you can break out of a event.get loop without losing
# the leftover events
yield(_eventQueue.pop(0))
raise StopIteration()
def wait(timeout=None, flush=True):
"""Wait for an event.
@type timeout: int or None
@param timeout: The time in seconds that this function will wait before
giving up and returning None.
With the default value of None, this will block forever.
@type flush: boolean
@param flush: If True a call to L{tdl.flush} will be made before listening
for events.
@rtype: L{Event} or None
@return: Returns an instance derived from L{Event}, or None if the function
has timed out.
Anything added via L{push} will also be returned.
@since: 1.4.0
"""
if timeout is not None:
timeout = timeout + _time.clock() # timeout at this time
while True:
if _eventQueue:
return _eventQueue.pop(0)
if flush:
# a full 'round' of events need to be processed before flushing
_tdl.flush()
if timeout and _time.clock() >= timeout:
return None # return None on timeout
_time.sleep(0.001) # sleep 1ms
_processEvents()
def push(event):
"""Push an event into the event buffer.
@type event: L{Event}-like object
@param event: The event will be available on the next call to L{event.get}.
An event pushed in the middle of a L{get} will not show until
the next time L{get} called preventing push related
infinite loops.
This object should at least have a 'type' attribute.
"""
_pushedEvents.append(event)
def key_wait():
"""Waits until the user presses a key.
Then returns a L{KeyDown} event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
@rtype: L{KeyDown}
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
_time.sleep(.001)
def set_key_repeat(delay=500, interval=0):
"""Does nothing.
"""
pass
def is_window_closed():
"""Returns True if the exit button on the window has been clicked and
stays True afterwards.
@rtype: boolean
"""
return _lib.TCOD_console_is_window_closed()
__all__ = [_var for _var in locals().keys() if _var[0] != '_']
App.runOnce = _style.backport(App.run_once)
keyWait = _style.backport(key_wait)
setKeyRepeat = _style.backport(set_key_repeat)
isWindowClosed = _style.backport(is_window_closed)
| def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)""" | identifier_body |
event.py | """
This module handles user input.
To handle user input you will likely want to use the L{event.get} function
or create a subclass of L{event.App}.
- L{event.get} iterates over recent events.
- L{event.App} passes events to the overridable methods: ev_* and key_*.
But there are other options such as L{event.keyWait} and L{event.isWindowClosed}.
A few event attributes are actually string constants.
Here's a reference for those:
- L{Event.type}
'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
- L{MouseButtonEvent.button} (found in L{MouseDown} and L{MouseUp} events)
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
- L{KeyEvent.key} (found in L{KeyDown} and L{KeyUp} events)
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
"""
import time as _time
from tcod import ffi as _ffi
from tcod import lib as _lib
import tdl as _tdl
from . import style as _style
_eventQueue = []
_pushedEvents = []
_mousel = 0
_mousem = 0
_mouser = 0
# this interprets the constants from libtcod and makes a key -> keyname dictionary
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs
return _keyNames
_keyNames = _parseKeyNames(_lib)
class Event(object):
"""Base Event class.
You can easily subclass this to make your own events. Be sure to set
the class attribute L{Event.type} for it to be passed to a custom L{App}
ev_* method."""
type = None
"""String constant representing the type of event.
The L{App} ev_* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
parameters.append('%s=%r' % (attr, value))
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)"""
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But a single App instance can not be run multiple times simultaneously.
"""
if getattr(self, '_App__running', False):
raise _tdl.TDLError('An App can not be run multiple times simultaneously')
self.__running = True
while self.__running:
self.runOnce()
def run_once(self):
"""Pump events to this App instance and then return.
This works in the way described in L{App.run} except it immediately
returns after the first L{update} call.
Having multiple L{App} instances and selectively calling runOnce on
them is a decent way to create a state machine.
"""
if not hasattr(self, '_App__prevTime'):
self.__prevTime = _time.clock() # initiate __prevTime
for event in get():
if event.type: # exclude custom events with a blank type variable
# call the ev_* methods
method = 'ev_%s' % event.type # ev_TYPE
getattr(self, method)(event)
if event.type == 'KEYDOWN':
# call the key_* methods
method = 'key_%s' % event.key # key_KEYNAME
if hasattr(self, method): # silently exclude undefined methods
getattr(self, method)(event)
newTime = _time.clock()
self.update(newTime - self.__prevTime)
self.__prevTime = newTime
#_tdl.flush()
def _processEvents():
"""Flushes the event queue from libtcod into the global list _eventQueue"""
global _mousel, _mousem, _mouser, _eventsflushed, _pushedEvents
_eventsflushed = True
events = _pushedEvents # get events from event.push
_pushedEvents = [] # then clear the pushed events queue
mouse = _ffi.new('TCOD_mouse_t *')
libkey = _ffi.new('TCOD_key_t *')
while 1:
libevent = _lib.TCOD_sys_check_for_event(_lib.TCOD_EVENT_ANY, libkey, mouse)
if not libevent: # no more events from libtcod
break
#if mouse.dx or mouse.dy:
if libevent & _lib.TCOD_EVENT_MOUSE_MOVE:
events.append(MouseMotion((mouse.x, mouse.y),
(mouse.cx, mouse.cy),
(mouse.dx, mouse.dy),
(mouse.dcx, mouse.dcy)))
mousepos = ((mouse.x, mouse.y), (mouse.cx, mouse.cy))
for oldstate, newstate, released, button in \
zip((_mousel, _mousem, _mouser),
(mouse.lbutton, mouse.mbutton, mouse.rbutton),
(mouse.lbutton_pressed, mouse.mbutton_pressed,
mouse.rbutton_pressed),
(1, 2, 3)):
if released:
if not oldstate:
events.append(MouseDown(button, *mousepos))
events.append(MouseUp(button, *mousepos))
if newstate:
events.append(MouseDown(button, *mousepos))
elif newstate and not oldstate:
events.append(MouseDown(button, *mousepos))
if mouse.wheel_up:
events.append(MouseDown(4, *mousepos))
if mouse.wheel_down:
events.append(MouseDown(5, *mousepos))
_mousel = mouse.lbutton
_mousem = mouse.mbutton
_mouser = mouse.rbutton
if libkey.vk == _lib.TCODK_NONE:
break
if libkey.pressed:
keyevent = KeyDown
else:
keyevent = KeyUp
if libkey.vk == _lib.TCODK_TEXT:
# Hack 2017-03-22 HexDecimal
# Fix undefined libtcod behaviour which breaks 32-bit builds.
libkey.c = b'\x00'
libkey.shift = False
libkey.lalt = libkey.ralt = False
libkey.lctrl = libkey.rctrl = False
libkey.lmeta = libkey.rmeta = False
events.append(
keyevent(
libkey.vk,
libkey.c.decode('ascii', errors='ignore'),
_ffi.string(libkey.text).decode('utf-8'),
libkey.shift,
libkey.lalt,
libkey.ralt,
libkey.lctrl,
libkey.rctrl,
libkey.lmeta,
libkey.rmeta,
)
)
if _lib.TCOD_console_is_window_closed():
events.append(Quit())
_eventQueue.extend(events)
def get():
"""Flushes the event queue and returns the list of events.
This function returns L{Event} objects that can be identified by their
type attribute or their class.
@rtype: iterator
@return: Returns an iterable of objects derived from L{Event} or anything
put in a L{push} call. If the iterator is deleted or otherwise
interrupted before finishing the excess items are preserved for the
next call.
"""
_processEvents()
return _event_generator()
def _event_generator():
while _eventQueue:
# if there is an interruption the rest of the events stay untouched
# this means you can break out of a event.get loop without losing
# the leftover events
yield(_eventQueue.pop(0))
raise StopIteration()
def | (timeout=None, flush=True):
"""Wait for an event.
@type timeout: int or None
@param timeout: The time in seconds that this function will wait before
giving up and returning None.
With the default value of None, this will block forever.
@type flush: boolean
@param flush: If True a call to L{tdl.flush} will be made before listening
for events.
@rtype: L{Event} or None
@return: Returns an instance derived from L{Event}, or None if the function
has timed out.
Anything added via L{push} will also be returned.
@since: 1.4.0
"""
if timeout is not None:
timeout = timeout + _time.clock() # timeout at this time
while True:
if _eventQueue:
return _eventQueue.pop(0)
if flush:
# a full 'round' of events need to be processed before flushing
_tdl.flush()
if timeout and _time.clock() >= timeout:
return None # return None on timeout
_time.sleep(0.001) # sleep 1ms
_processEvents()
def push(event):
"""Push an event into the event buffer.
@type event: L{Event}-like object
@param event: The event will be available on the next call to L{event.get}.
An event pushed in the middle of a L{get} will not show until
the next time L{get} called preventing push related
infinite loops.
This object should at least have a 'type' attribute.
"""
_pushedEvents.append(event)
def key_wait():
"""Waits until the user presses a key.
Then returns a L{KeyDown} event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
@rtype: L{KeyDown}
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
_time.sleep(.001)
def set_key_repeat(delay=500, interval=0):
"""Does nothing.
"""
pass
def is_window_closed():
"""Returns True if the exit button on the window has been clicked and
stays True afterwards.
@rtype: boolean
"""
return _lib.TCOD_console_is_window_closed()
__all__ = [_var for _var in locals().keys() if _var[0] != '_']
App.runOnce = _style.backport(App.run_once)
keyWait = _style.backport(key_wait)
setKeyRepeat = _style.backport(set_key_repeat)
isWindowClosed = _style.backport(is_window_closed)
| wait | identifier_name |
dealerDispatcherList.js | <%@ page contentType="text/html;charset=UTF-8" %>
<script>
$(document).ready(function() {
$('#dealerTable').bootstrapTable({
//请求方法
method: 'get',
//类型json
dataType: "json",
//显示刷新按钮
showRefresh: true,
//显示切换手机试图按钮
showToggle: true,
//显示 内容列下拉框
showColumns: true,
//显示到处按钮
showExport: true,
//显示切换分页按钮
showPaginationSwitch: true,
//最低显示2行
minimumCountColumns: 2,
//是否显示行间隔色
striped: true,
//是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
cache: false,
//是否显示分页(*)
pagination: true,
//排序方式
sortOrder: "asc",
//初始化加载第一页,默认第一页
pageNumber:1,
//每页的记录行数(*)
pageSize: 10,
//可供选择的每页的行数(*)
pageList: [10, 25, 50, 100],
//这个接口需要处理bootstrap table传递的固定参数,并返回特定格式的json数据
url: "${ctx}/process/shopmsg/shopMsg/dataDispatcher",
//默认值为 'limit',传给服务端的参数为:limit, offset, search, sort, order Else
//queryParamsType:'',
////查询参数,每次调用是会带上这个参数,可自定义
queryParams : function(params) {
var searchParam = $("#searchForm").serializeJSON();
searchParam.pageNo = params.limit === undefined? "1" :params.offset/params.limit+1;
searchParam.pageSize = params.limit === undefined? -1 : params.limit;
searchParam.orderBy = params.sort === undefined? "" : params.sort+ " "+ params.order;
return searchParam;
},
//分页方式:client客户端分页,server服务端分页(*)
sidePagination: "server",
contextMenuTrigger:"right",//pc端 按右键弹出菜单
contextMenuTriggerMobile:"press",//手机端 弹出菜单,click:单击, press:长按。
contextMenu: '#context-menu',
onContextMenuItem: function(row, $el){
if($el.data("item") == "edit"){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + row.id;
} else if($el.data("item") == "delete"){
del(row.id);
}
},
onClickRow: function(row, $el){
},
columns: [{
checkbox: true
}
,{
field: 'companyCode',
title: '经销商编码',
sortable: true
}
,{
field: 'companyName',
title: '经销商名称',
sortable: true
}
,{
field: 'contacts',
title: '联系人',
sortable: true
}
,{
field: 'mobile',
title: '手机',
sortable: true
}
,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}
,{
field: 'underProduct',
title: '承接品类',
sortable: true
},{
field: 'gmName',
title: '工贸名称',
sortable: true
},
/*
,{
field: 'channelName',
title: '渠道名称',
sortable: true
}
,{
field: 'taxCode',
title: '税码',
sortable: true
}
,{
field: 'kjtAccount',
title: '快捷通账号',
sortable: true
}
,{
field: 'legalPersonName',
title: '法人姓名',
sortable: true
}
,{
field: 'legalPersonIdCard',
title: '法人身份号',
sortable: true
}
,{
field: 'companyTel',
title: '公司电话',
sortable: true
}
*/
/* ,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}*/
]
});
if(navigator.userAgent.match(/(iPhone|iPod|Android|ios)/i)){//如果是移动端
$('#dealerTable').bootstrapTable("toggleView");
}
$('#dealerTable').on('check.bs.table uncheck.bs.table load-success.bs.table ' +
'check-all.bs.table uncheck-all.bs.table', function () {
var sels = $('#dealerTable').bootstrapTable('getSelections');
$('#remove').prop('disabled', ! sels.length);
$('#edit').prop('disabled', sels.length!=1);
if(sels.length == 1 && sels[0].auditState =='0'){
$('#audit').prop('disabled', false);
} else {
$('#audit').prop('disabled', true);
}
});
$("#btnImport").click(function(){
jh.open({
type: 1,
area: [500, 300],
title:"导入数据",
content:$("#importBox").html() ,
btn: ['下载模板','确定', '关闭'],
btn1: function(index, layero){
window.location='${ctx}/shop/dealer/dealer/import/template';
},
btn2: function(index, layero){
var inputForm =top.$("#importForm");
var top_iframe = top.getActiveTab().attr("name");//获取当前active的tab的iframe
inputForm.attr("target",top_iframe);//表单提交成功后,从服务器返回的url在当前tab中展示
inputForm.onsubmit = function(){
jh.loading(' 正在导入,请稍等...');
}
inputForm.submit();
jh.close(index);
},
btn3: function(index){
jh.close(index);
}
});
});
$("#search").click("click", function() {// 绑定查询按扭
$('#dealerTable').bootstrapTable('refresh');
});
$("#reset").click("click", function() {// 绑定查询按扭
$("#searchForm input").val("");
$("#searchForm select").val("");
$("#searchForm .select-item").html("");
$('#dealerTable').bootstrapTable('refresh');
});
});
function getIdSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.id
});
}
function getNameSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.companyName
});
}
function del(id){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/delete?id="+id, function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
});
}
function deleteAll(){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/deleteAll?ids=" + getIdSelections(), function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
})
}
function edit(){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + getIdSelections();
}
function audit(id){
if(id == undefined){
id = getIdSelections();
}
jh.open({
type: 1,
area: ['400px','200px'],
title:"审核",
content:$("#auditBox").html() ,
scrollbar: false,
btn: ['确定', '关闭'],
btn1: function(index, layero){
var inputForm = layero.find("#auditForm");
var sel = inputForm.find("input[name='auditState']:checked").val();
if(sel==undefined){
jh.alert('请选择是否同意');
return false;
}
if(sel=='2'){
var auditDesc = inputForm.find('#auditDesc');
if($.trim(auditDesc.val())==''){
jh.alert('请输入不同意原因');
return false;
}
}
jh.loading(' 正在审核,请稍等...');
jh.post("${ctx}/shop/dealer/dealer/audit",inputForm.serialize(),function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
});
jh.close(index);
},
btn2: function(index){
jh.close(index);
},
success: function(layero, index){
//窗口打开后做初始化
var contElem = layero.find('.layui-layer-content');
var inputForm = contElem.find("#auditForm");
var idElem = inputForm.find('#auditId');
idElem.val(id);
var auditDescDiv = inputForm.find('#auditDescDiv');
var auditDesc = inputForm.find('#auditDesc');
var conHeight = contElem.height();
var layerHeight = layero.height();
inputForm.find("input[name='auditState']").change(function(){
var sel = $(this).val();
if(sel == "1"){
auditDescDiv.addClass('hide');
auditDesc.val('');
layero.height(layerHeight);
contElem.height(conHeight);
} else if(sel == "2"){
auditDescDiv.removeClass('hide');
layero.height(layerHeight+120);
contElem.height(conHeight+120);
auditDesc.focus();
}
})
}
});
}
var callbackdata = function () {
var arrIds = getIdSelections();
var arrNames = getNameSelections();
return {
arrIds:arrIds,
arrNames:arrNames
};
}
</script> | conditional_block |
||
dealerDispatcherList.js | <%@ page contentType="text/html;charset=UTF-8" %>
<script>
$(document).ready(function() {
$('#dealerTable').bootstrapTable({
//请求方法
method: 'get',
//类型json
dataType: "json",
//显示刷新按钮
showRefresh: true,
//显示切换手机试图按钮
showToggle: true,
//显示 内容列下拉框
showColumns: true,
//显示到处按钮
showExport: true,
//显示切换分页按钮
showPaginationSwitch: true,
//最低显示2行
minimumCountColumns: 2,
//是否显示行间隔色
striped: true,
//是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
cache: false,
//是否显示分页(*)
pagination: true,
//排序方式
sortOrder: "asc",
//初始化加载第一页,默认第一页
pageNumber:1,
//每页的记录行数(*)
pageSize: 10,
//可供选择的每页的行数(*)
pageList: [10, 25, 50, 100],
//这个接口需要处理bootstrap table传递的固定参数,并返回特定格式的json数据
url: "${ctx}/process/shopmsg/shopMsg/dataDispatcher",
//默认值为 'limit',传给服务端的参数为:limit, offset, search, sort, order Else
//queryParamsType:'',
////查询参数,每次调用是会带上这个参数,可自定义
queryParams : function(params) {
var searchParam = $("#searchForm").serializeJSON();
searchParam.pageNo = params.limit === undefined? "1" :params.offset/params.limit+1;
searchParam.pageSize = params.limit === undefined? -1 : params.limit;
searchParam.orderBy = params.sort === undefined? "" : params.sort+ " "+ params.order;
return searchParam;
},
//分页方式:client客户端分页,server服务端分页(*)
sidePagination: "server",
contextMenuTrigger:"right",//pc端 按右键弹出菜单
contextMenuTriggerMobile:"press",//手机端 弹出菜单,click:单击, press:长按。
contextMenu: '#context-menu',
onContextMenuItem: function(row, $el){
if($el.data("item") == "edit"){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + row.id;
} else if($el.data("item") == "delete"){
del(row.id);
}
},
onClickRow: function(row, $el){
},
columns: [{
checkbox: true
}
,{
field: 'companyCode',
title: '经销商编码',
sortable: true
}
,{
field: 'companyName',
title: '经销商名称',
sortable: true
}
,{
field: 'contacts',
title: '联系人',
sortable: true
}
,{
field: 'mobile',
title: '手机',
sortable: true
}
,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}
,{
field: 'underProduct',
title: '承接品类',
sortable: true
},{
field: 'gmName',
title: '工贸名称',
sortable: true
},
/*
,{
field: 'channelName',
title: '渠道名称',
sortable: true
}
,{
field: 'taxCode',
title: '税码',
sortable: true
}
,{
field: 'kjtAccount',
title: '快捷通账号', |
}
,{
field: 'legalPersonName',
title: '法人姓名',
sortable: true
}
,{
field: 'legalPersonIdCard',
title: '法人身份号',
sortable: true
}
,{
field: 'companyTel',
title: '公司电话',
sortable: true
}
*/
/* ,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}*/
]
});
if(navigator.userAgent.match(/(iPhone|iPod|Android|ios)/i)){//如果是移动端
$('#dealerTable').bootstrapTable("toggleView");
}
$('#dealerTable').on('check.bs.table uncheck.bs.table load-success.bs.table ' +
'check-all.bs.table uncheck-all.bs.table', function () {
var sels = $('#dealerTable').bootstrapTable('getSelections');
$('#remove').prop('disabled', ! sels.length);
$('#edit').prop('disabled', sels.length!=1);
if(sels.length == 1 && sels[0].auditState =='0'){
$('#audit').prop('disabled', false);
} else {
$('#audit').prop('disabled', true);
}
});
$("#btnImport").click(function(){
jh.open({
type: 1,
area: [500, 300],
title:"导入数据",
content:$("#importBox").html() ,
btn: ['下载模板','确定', '关闭'],
btn1: function(index, layero){
window.location='${ctx}/shop/dealer/dealer/import/template';
},
btn2: function(index, layero){
var inputForm =top.$("#importForm");
var top_iframe = top.getActiveTab().attr("name");//获取当前active的tab的iframe
inputForm.attr("target",top_iframe);//表单提交成功后,从服务器返回的url在当前tab中展示
inputForm.onsubmit = function(){
jh.loading(' 正在导入,请稍等...');
}
inputForm.submit();
jh.close(index);
},
btn3: function(index){
jh.close(index);
}
});
});
$("#search").click("click", function() {// 绑定查询按扭
$('#dealerTable').bootstrapTable('refresh');
});
$("#reset").click("click", function() {// 绑定查询按扭
$("#searchForm input").val("");
$("#searchForm select").val("");
$("#searchForm .select-item").html("");
$('#dealerTable').bootstrapTable('refresh');
});
});
function getIdSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.id
});
}
function getNameSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.companyName
});
}
function del(id){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/delete?id="+id, function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
});
}
function deleteAll(){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/deleteAll?ids=" + getIdSelections(), function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
})
}
function edit(){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + getIdSelections();
}
function audit(id){
if(id == undefined){
id = getIdSelections();
}
jh.open({
type: 1,
area: ['400px','200px'],
title:"审核",
content:$("#auditBox").html() ,
scrollbar: false,
btn: ['确定', '关闭'],
btn1: function(index, layero){
var inputForm = layero.find("#auditForm");
var sel = inputForm.find("input[name='auditState']:checked").val();
if(sel==undefined){
jh.alert('请选择是否同意');
return false;
}
if(sel=='2'){
var auditDesc = inputForm.find('#auditDesc');
if($.trim(auditDesc.val())==''){
jh.alert('请输入不同意原因');
return false;
}
}
jh.loading(' 正在审核,请稍等...');
jh.post("${ctx}/shop/dealer/dealer/audit",inputForm.serialize(),function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
});
jh.close(index);
},
btn2: function(index){
jh.close(index);
},
success: function(layero, index){
//窗口打开后做初始化
var contElem = layero.find('.layui-layer-content');
var inputForm = contElem.find("#auditForm");
var idElem = inputForm.find('#auditId');
idElem.val(id);
var auditDescDiv = inputForm.find('#auditDescDiv');
var auditDesc = inputForm.find('#auditDesc');
var conHeight = contElem.height();
var layerHeight = layero.height();
inputForm.find("input[name='auditState']").change(function(){
var sel = $(this).val();
if(sel == "1"){
auditDescDiv.addClass('hide');
auditDesc.val('');
layero.height(layerHeight);
contElem.height(conHeight);
} else if(sel == "2"){
auditDescDiv.removeClass('hide');
layero.height(layerHeight+120);
contElem.height(conHeight+120);
auditDesc.focus();
}
})
}
});
}
var callbackdata = function () {
var arrIds = getIdSelections();
var arrNames = getNameSelections();
return {
arrIds:arrIds,
arrNames:arrNames
};
}
</script> | sortable: true | random_line_split |
entities.py | """
Created on Dec 7, 2014
@author: bbuxton
"""
import random
import sys
import pygame
import math
from collections.abc import Callable, Iterator
from typing import Generator, Iterable, Optional, Generic, Tuple, TypeVar, Type, cast
import zombiesim.util as zutil
from zombiesim.type_def import Bounds, Direction, Food, Human, HasPosition,\
PointProducer, Point, World, Zombie
SpritePredicate = Callable[[pygame.sprite.Sprite], bool]
EntityCallback = Callable[['Entity'], None]
T = TypeVar('T', bound='Entity')
C = TypeVar('C', bound=HasPosition)
class EntityGroup(pygame.sprite.Group, Generic[T]):
def __init__(self, clazz: type[T]):
super().__init__()
self.entity_class: type[T] = clazz
def create_one(self) -> T:
entity: T = self.entity_class()
self.add(entity)
return entity
def __iter__(self) -> Iterator[T]:
return cast(Iterator[T], super().__iter__())
ENTITY_WIDTH = 10
ENTITY_HEIGHT = 10
class Entity(pygame.sprite.Sprite):
@classmethod
def create_group(cls: Type[T],
size: int,
point_getter: PointProducer) -> EntityGroup[T]:
all_group = EntityGroup[T](cls)
for _ in range(size):
new_entity = all_group.create_one()
pos = point_getter()
new_entity.rect.center = int(pos.x), int(pos.y)
return all_group
def __init__(self, color: pygame.Color = pygame.Color('black')):
super().__init__()
self.color: pygame.Color = color
self._mouse_groups: list[pygame.sprite.AbstractGroup] = []
self.image: pygame.Surface = self.create_image()
self.rect: pygame.rect.Rect = self.image.get_rect()
self.radius: float = min(self.rect.width, self.rect.height) / 2
self.draw_image(self.color) # FIXME
@property
def center(self) -> Tuple[int,int]:
return self.rect.center
@property
def position(self) -> Point:
return Point(*self.center)
def create_image(self) -> pygame.Surface:
image = pygame.Surface(
[ENTITY_WIDTH, ENTITY_HEIGHT], flags=pygame.SRCALPHA)
image.fill(pygame.Color(0, 0, 0, 0))
return image
def draw_image(self, color: pygame.Color) -> None:
pass
def reset_pos(self) -> None:
pass
def pick_up(self, pos: Point) -> None:
groups = self.groups()
self._mouse_groups = []
for group in groups:
group.remove(self)
self._mouse_groups.append(group)
self._mouse_offset = self.position - pos
def update_pick_up(self, pos: Point) -> None:
new_point = pos + self._mouse_offset
self.rect.center = int(new_point.x), int(new_point.y)
self.reset_pos()
def put_down(self, pos: Point):
self.update_pick_up(pos)
for group in self._mouse_groups:
group.add(self)
del self._mouse_groups
del self._mouse_offset
def update(self, *args, **kwargs) -> None:
""" Let's be honest - this is to make the typing system happy"""
self.update_state(args[0])
super().update(*args, **kwargs)
def closest_to(self,
other: Iterable[C],
bounds: Bounds,
to_include: Callable[[C], bool] = lambda _: True) \
-> tuple[Optional[C], float]:
self_rect: Optional[pygame.rect.Rect] = self.rect
if self_rect is None:
return (None, 0.0)
span = zutil.span(bounds)
span_mid = span / 2.0
curmin: float = sys.maxsize
curactor: Optional[C] = None
pos = self.position
for each in other:
if not to_include(each):
continue
dist = pos.distance(each.position)
if dist > span_mid:
dist = span - dist
if dist < curmin:
curmin = dist
curactor = each
return (curactor, curmin)
def update_state(self, field: World) -> None:
pass
class Actor(Entity):
def __init__(self, color: pygame.Color, default_energy: float = 0.0):
super().__init__(color)
self.energy: float = default_energy
self.change_dir()
@property
def x(self) -> int:
return self.rect.x
@property
def y(self) -> int:
return self.rect.y
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.ellipse(self.image, color, self.image.get_rect())
def update_pos(self, direc: Direction) -> None:
new_x = self.x + (direc.x * self.energy)
new_y = self.y + (direc.y * self.energy)
self.rect.x = int(round(new_x))
self.rect.y = int(round(new_y))
def hit_edge(self, parent_rect: pygame.rect.Rect) -> None:
if self.rect.left < parent_rect.left:
self.rect.right = parent_rect.right
if self.rect.right > parent_rect.right:
self.rect.left = parent_rect.left
if self.rect.top < parent_rect.top:
self.rect.bottom = parent_rect.bottom
if self.rect.bottom > parent_rect.bottom:
self.rect.top = parent_rect.top
def change_dir(self) -> None:
self.current_dir = zutil.random_direction()
def update_state(self, field: World) -> None:
self.update_pos(self.current_dir)
super().update_state(field)
ZOMBIE_VISION: int = 100
ZOMBIE_ATTACK_WAIT_MAX: int = 25
ZOMBIE_COLOR: pygame.Color = pygame.Color('red')
ZOMBIE_ENERGY: float = 2.0
RECALCULATE_HUMANS_SEEN: int = 10
class ZombieSprite(Actor):
def __init__(self):
self.angle = zutil.random_angle()
super().__init__(ZOMBIE_COLOR, ZOMBIE_ENERGY)
self.attack_wait = random.randint(
int(ZOMBIE_ATTACK_WAIT_MAX / 2), ZOMBIE_ATTACK_WAIT_MAX)
def update_state(self, field: World) -> None:
if self.attack_wait > 0:
self.attack_wait -= 1
return
goto = Point(*self.rect.center)
goto = self.run_to_humans(field, goto)
next_point = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
# TODO Revisit
victim_angle = Direction.from_points(Point(*self.rect.center), next_point).to_angle()
if victim_angle > self.angle:
self.angle += math.radians(10)
elif victim_angle < self.angle:
self.angle -= math.radians(10)
self.current_dir = Direction.from_angle(self.angle)
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_HUMANS_SEEN)
def humans_in_vision(self, field: World) -> Iterable[Human]:
return [human for human in field.humans
if self.position.distance(human.position) < ZOMBIE_VISION]
def run_to_humans(self, field: World, goto: Point) -> Point:
humans = self.humans_in_vision(field)
bounds = field.bounds
victim, _ = self.closest_to(humans, bounds)
if victim is None:
return goto
span = zutil.span(bounds)
span_mid = span / 2.0
direc = Direction.from_points(self.position, victim.position)
dist = self.position.distance(victim.position)
if dist > span_mid:
dist = span - dist
direc = -direc
factor_dist = float(ZOMBIE_VISION - dist)
goto = Point(int(goto.x + (factor_dist * direc.x)),
int(goto.y + (factor_dist * direc.y)))
return goto
def change_dir(self) -> None:
self.angle = zutil.random_angle_change(self.angle, 10)
self.current_dir = Direction.from_angle(self.angle)
HUMAN_VISION: int = 50
HUMAN_COLOR: pygame.Color = pygame.Color('pink')
HUMAN_ENERGY_LEVEL: float = 4.0
HUMAN_HUNGRY_LEVEL: float = HUMAN_ENERGY_LEVEL / 2
RECALCULATE_ZOMBIES_SEEN: int = 5
class HumanSprite(Actor):
def | (self):
super().__init__(HUMAN_COLOR)
self.lifetime: Generator[float, None, None] = self.new_lifetime()
def eat_food(self, food: Food) -> None:
if self.is_hungry():
food.consume()
self.lifetime = self.new_lifetime()
self.change_dir()
def is_hungry(self) -> bool:
return self.energy < HUMAN_HUNGRY_LEVEL
def is_dead(self) -> bool:
return self.energy == 0
def new_lifetime(self) -> Generator[float, None, None]:
return zutil.xfrange(2 + (random.random() * 2), 0, -0.0005)
def alpha(self) -> float:
result = self.energy / 2.0
return min(result, 1)
def update_state(self, field: World) -> None:
self.energy = next(self.lifetime, 0)
if self.is_dead():
self.kill()
return
self.color.a = int(255 * self.alpha())
self.draw_image(self.color)
goto = Point(*self.rect.center)
goto = self.run_from_zombies(field, goto)
goto = self.run_to_food(field, goto)
next_pos = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
go_to_dir = Direction.from_points(Point(*self.rect.center), next_pos)
self.current_dir = go_to_dir
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_ZOMBIES_SEEN)
def zombies_in_vision(self, field: World) -> Iterable[Zombie]:
return [zombie for zombie in field.zombies
if self.position.distance(zombie.position) <= HUMAN_VISION]
def run_from_zombies(self, field: World, goto: Point) -> Point:
span = zutil.span(field.bounds)
span_mid = span / 2.0
for zombie in self.zombies_in_vision(field):
dist = self.position.distance(zombie.position)
rev_dir = False
if dist > span_mid:
dist = span - dist
rev_dir = True
factor_dist = float(HUMAN_VISION - dist) ** 2
direc = Direction.from_points(self.position, zombie.position)
if not rev_dir:
direc = -direc
goto = Point(goto.x + (factor_dist * direc.x),
goto.y + (factor_dist * direc.y))
return goto
def run_to_food(self, field: World, goto: Point) -> Point:
if self.is_hungry():
span = zutil.span(field.bounds)
span_mid = span / 2.0
food, _ = self.closest_to(field.food, field.bounds)
if food is not None:
direc = Direction.from_points(self.position, food.position)
dist = self.position.distance(food.position)
if dist > span_mid:
direc = -direc
factor = (float(self.energy) / 4 * HUMAN_VISION) ** 2
goto = Point(goto.x + (factor * direc.x), goto.y + (factor * direc.y))
return goto
class Consumable(Entity):
def __init__(self, color: pygame.Color, amount: int = 5):
super().__init__(color)
self.amount: int = amount
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.rect(self.image, color, self.image.get_rect())
def consume(self) -> None:
self.amount -= 1
if not self.has_more():
self.kill()
def has_more(self) -> bool:
return self.amount > 0
FOOD_COLOR: pygame.Color = pygame.Color('green')
DEFAULT_FOOD_AMOUNT: int = 25
class FoodSprite(Consumable):
def __init__(self):
super().__init__(FOOD_COLOR, amount=DEFAULT_FOOD_AMOUNT)
| __init__ | identifier_name |
entities.py | """
Created on Dec 7, 2014
@author: bbuxton
"""
import random
import sys
import pygame
import math
from collections.abc import Callable, Iterator
from typing import Generator, Iterable, Optional, Generic, Tuple, TypeVar, Type, cast
import zombiesim.util as zutil
from zombiesim.type_def import Bounds, Direction, Food, Human, HasPosition,\
PointProducer, Point, World, Zombie
SpritePredicate = Callable[[pygame.sprite.Sprite], bool]
EntityCallback = Callable[['Entity'], None]
T = TypeVar('T', bound='Entity')
C = TypeVar('C', bound=HasPosition)
class EntityGroup(pygame.sprite.Group, Generic[T]):
def __init__(self, clazz: type[T]):
super().__init__()
self.entity_class: type[T] = clazz
def create_one(self) -> T:
entity: T = self.entity_class()
self.add(entity)
return entity
def __iter__(self) -> Iterator[T]:
return cast(Iterator[T], super().__iter__())
ENTITY_WIDTH = 10
ENTITY_HEIGHT = 10
class Entity(pygame.sprite.Sprite):
@classmethod
def create_group(cls: Type[T],
size: int,
point_getter: PointProducer) -> EntityGroup[T]:
all_group = EntityGroup[T](cls)
for _ in range(size):
new_entity = all_group.create_one()
pos = point_getter()
new_entity.rect.center = int(pos.x), int(pos.y)
return all_group
def __init__(self, color: pygame.Color = pygame.Color('black')):
super().__init__()
self.color: pygame.Color = color
self._mouse_groups: list[pygame.sprite.AbstractGroup] = []
self.image: pygame.Surface = self.create_image()
self.rect: pygame.rect.Rect = self.image.get_rect()
self.radius: float = min(self.rect.width, self.rect.height) / 2
self.draw_image(self.color) # FIXME
@property
def center(self) -> Tuple[int,int]:
return self.rect.center
@property
def position(self) -> Point:
return Point(*self.center)
def create_image(self) -> pygame.Surface:
image = pygame.Surface(
[ENTITY_WIDTH, ENTITY_HEIGHT], flags=pygame.SRCALPHA)
image.fill(pygame.Color(0, 0, 0, 0))
return image
def draw_image(self, color: pygame.Color) -> None:
pass
def reset_pos(self) -> None:
pass
def pick_up(self, pos: Point) -> None:
groups = self.groups()
self._mouse_groups = []
for group in groups:
group.remove(self)
self._mouse_groups.append(group)
self._mouse_offset = self.position - pos
def update_pick_up(self, pos: Point) -> None:
new_point = pos + self._mouse_offset
self.rect.center = int(new_point.x), int(new_point.y)
self.reset_pos()
def put_down(self, pos: Point):
self.update_pick_up(pos)
for group in self._mouse_groups:
group.add(self)
del self._mouse_groups
del self._mouse_offset
def update(self, *args, **kwargs) -> None:
""" Let's be honest - this is to make the typing system happy"""
self.update_state(args[0])
super().update(*args, **kwargs)
def closest_to(self,
other: Iterable[C],
bounds: Bounds,
to_include: Callable[[C], bool] = lambda _: True) \
-> tuple[Optional[C], float]:
self_rect: Optional[pygame.rect.Rect] = self.rect
if self_rect is None:
return (None, 0.0)
span = zutil.span(bounds)
span_mid = span / 2.0
curmin: float = sys.maxsize
curactor: Optional[C] = None
pos = self.position
for each in other:
if not to_include(each):
continue
dist = pos.distance(each.position)
if dist > span_mid:
dist = span - dist
if dist < curmin:
curmin = dist
curactor = each
return (curactor, curmin)
def update_state(self, field: World) -> None:
pass
class Actor(Entity):
def __init__(self, color: pygame.Color, default_energy: float = 0.0):
super().__init__(color)
self.energy: float = default_energy
self.change_dir()
@property
def x(self) -> int:
return self.rect.x
@property
def y(self) -> int:
return self.rect.y
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.ellipse(self.image, color, self.image.get_rect())
def update_pos(self, direc: Direction) -> None:
new_x = self.x + (direc.x * self.energy)
new_y = self.y + (direc.y * self.energy)
self.rect.x = int(round(new_x))
self.rect.y = int(round(new_y))
def hit_edge(self, parent_rect: pygame.rect.Rect) -> None:
if self.rect.left < parent_rect.left:
self.rect.right = parent_rect.right
if self.rect.right > parent_rect.right:
self.rect.left = parent_rect.left
if self.rect.top < parent_rect.top:
self.rect.bottom = parent_rect.bottom
if self.rect.bottom > parent_rect.bottom:
self.rect.top = parent_rect.top
def change_dir(self) -> None:
self.current_dir = zutil.random_direction()
def update_state(self, field: World) -> None:
self.update_pos(self.current_dir)
super().update_state(field)
ZOMBIE_VISION: int = 100
ZOMBIE_ATTACK_WAIT_MAX: int = 25
ZOMBIE_COLOR: pygame.Color = pygame.Color('red')
ZOMBIE_ENERGY: float = 2.0
RECALCULATE_HUMANS_SEEN: int = 10
class ZombieSprite(Actor):
def __init__(self):
self.angle = zutil.random_angle()
super().__init__(ZOMBIE_COLOR, ZOMBIE_ENERGY)
self.attack_wait = random.randint(
int(ZOMBIE_ATTACK_WAIT_MAX / 2), ZOMBIE_ATTACK_WAIT_MAX)
def update_state(self, field: World) -> None:
if self.attack_wait > 0:
self.attack_wait -= 1
return
goto = Point(*self.rect.center)
goto = self.run_to_humans(field, goto)
next_point = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
# TODO Revisit
victim_angle = Direction.from_points(Point(*self.rect.center), next_point).to_angle()
if victim_angle > self.angle:
self.angle += math.radians(10)
elif victim_angle < self.angle:
self.angle -= math.radians(10)
self.current_dir = Direction.from_angle(self.angle)
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_HUMANS_SEEN)
def humans_in_vision(self, field: World) -> Iterable[Human]:
return [human for human in field.humans
if self.position.distance(human.position) < ZOMBIE_VISION]
def run_to_humans(self, field: World, goto: Point) -> Point:
humans = self.humans_in_vision(field)
bounds = field.bounds
victim, _ = self.closest_to(humans, bounds)
if victim is None:
return goto
span = zutil.span(bounds)
span_mid = span / 2.0
direc = Direction.from_points(self.position, victim.position)
dist = self.position.distance(victim.position)
if dist > span_mid:
dist = span - dist
direc = -direc
factor_dist = float(ZOMBIE_VISION - dist)
goto = Point(int(goto.x + (factor_dist * direc.x)),
int(goto.y + (factor_dist * direc.y)))
return goto
def change_dir(self) -> None:
self.angle = zutil.random_angle_change(self.angle, 10)
self.current_dir = Direction.from_angle(self.angle)
HUMAN_VISION: int = 50
HUMAN_COLOR: pygame.Color = pygame.Color('pink')
HUMAN_ENERGY_LEVEL: float = 4.0
HUMAN_HUNGRY_LEVEL: float = HUMAN_ENERGY_LEVEL / 2
RECALCULATE_ZOMBIES_SEEN: int = 5
class HumanSprite(Actor):
def __init__(self):
super().__init__(HUMAN_COLOR)
self.lifetime: Generator[float, None, None] = self.new_lifetime()
def eat_food(self, food: Food) -> None:
if self.is_hungry():
food.consume()
self.lifetime = self.new_lifetime()
self.change_dir()
def is_hungry(self) -> bool:
return self.energy < HUMAN_HUNGRY_LEVEL
def is_dead(self) -> bool:
return self.energy == 0
def new_lifetime(self) -> Generator[float, None, None]:
return zutil.xfrange(2 + (random.random() * 2), 0, -0.0005)
def alpha(self) -> float:
result = self.energy / 2.0
return min(result, 1)
def update_state(self, field: World) -> None:
self.energy = next(self.lifetime, 0)
if self.is_dead():
self.kill()
return
self.color.a = int(255 * self.alpha())
self.draw_image(self.color)
goto = Point(*self.rect.center)
goto = self.run_from_zombies(field, goto)
goto = self.run_to_food(field, goto)
next_pos = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
go_to_dir = Direction.from_points(Point(*self.rect.center), next_pos)
self.current_dir = go_to_dir
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_ZOMBIES_SEEN)
def zombies_in_vision(self, field: World) -> Iterable[Zombie]:
return [zombie for zombie in field.zombies
if self.position.distance(zombie.position) <= HUMAN_VISION]
def run_from_zombies(self, field: World, goto: Point) -> Point:
span = zutil.span(field.bounds)
span_mid = span / 2.0
for zombie in self.zombies_in_vision(field):
dist = self.position.distance(zombie.position)
rev_dir = False
if dist > span_mid:
dist = span - dist
rev_dir = True
factor_dist = float(HUMAN_VISION - dist) ** 2
direc = Direction.from_points(self.position, zombie.position)
if not rev_dir:
direc = -direc
goto = Point(goto.x + (factor_dist * direc.x),
goto.y + (factor_dist * direc.y))
return goto
def run_to_food(self, field: World, goto: Point) -> Point:
if self.is_hungry():
|
return goto
class Consumable(Entity):
def __init__(self, color: pygame.Color, amount: int = 5):
super().__init__(color)
self.amount: int = amount
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.rect(self.image, color, self.image.get_rect())
def consume(self) -> None:
self.amount -= 1
if not self.has_more():
self.kill()
def has_more(self) -> bool:
return self.amount > 0
FOOD_COLOR: pygame.Color = pygame.Color('green')
DEFAULT_FOOD_AMOUNT: int = 25
class FoodSprite(Consumable):
def __init__(self):
super().__init__(FOOD_COLOR, amount=DEFAULT_FOOD_AMOUNT)
| span = zutil.span(field.bounds)
span_mid = span / 2.0
food, _ = self.closest_to(field.food, field.bounds)
if food is not None:
direc = Direction.from_points(self.position, food.position)
dist = self.position.distance(food.position)
if dist > span_mid:
direc = -direc
factor = (float(self.energy) / 4 * HUMAN_VISION) ** 2
goto = Point(goto.x + (factor * direc.x), goto.y + (factor * direc.y)) | conditional_block |
entities.py | """
Created on Dec 7, 2014
@author: bbuxton
"""
import random
import sys
import pygame
import math
from collections.abc import Callable, Iterator
from typing import Generator, Iterable, Optional, Generic, Tuple, TypeVar, Type, cast
import zombiesim.util as zutil
from zombiesim.type_def import Bounds, Direction, Food, Human, HasPosition,\
PointProducer, Point, World, Zombie
SpritePredicate = Callable[[pygame.sprite.Sprite], bool]
EntityCallback = Callable[['Entity'], None]
T = TypeVar('T', bound='Entity')
C = TypeVar('C', bound=HasPosition)
class EntityGroup(pygame.sprite.Group, Generic[T]):
def __init__(self, clazz: type[T]):
super().__init__()
self.entity_class: type[T] = clazz
def create_one(self) -> T:
entity: T = self.entity_class()
self.add(entity)
return entity
def __iter__(self) -> Iterator[T]:
return cast(Iterator[T], super().__iter__())
ENTITY_WIDTH = 10
ENTITY_HEIGHT = 10
class Entity(pygame.sprite.Sprite):
@classmethod
def create_group(cls: Type[T],
size: int,
point_getter: PointProducer) -> EntityGroup[T]:
all_group = EntityGroup[T](cls)
for _ in range(size):
new_entity = all_group.create_one()
pos = point_getter()
new_entity.rect.center = int(pos.x), int(pos.y)
return all_group
def __init__(self, color: pygame.Color = pygame.Color('black')):
super().__init__()
self.color: pygame.Color = color
self._mouse_groups: list[pygame.sprite.AbstractGroup] = []
self.image: pygame.Surface = self.create_image()
self.rect: pygame.rect.Rect = self.image.get_rect()
self.radius: float = min(self.rect.width, self.rect.height) / 2
self.draw_image(self.color) # FIXME
@property
def center(self) -> Tuple[int,int]:
return self.rect.center
@property
def position(self) -> Point:
return Point(*self.center)
def create_image(self) -> pygame.Surface:
image = pygame.Surface(
[ENTITY_WIDTH, ENTITY_HEIGHT], flags=pygame.SRCALPHA)
image.fill(pygame.Color(0, 0, 0, 0))
return image
def draw_image(self, color: pygame.Color) -> None:
pass
def reset_pos(self) -> None:
pass
def pick_up(self, pos: Point) -> None:
groups = self.groups()
self._mouse_groups = []
for group in groups:
group.remove(self)
self._mouse_groups.append(group)
self._mouse_offset = self.position - pos
def update_pick_up(self, pos: Point) -> None:
|
def put_down(self, pos: Point):
self.update_pick_up(pos)
for group in self._mouse_groups:
group.add(self)
del self._mouse_groups
del self._mouse_offset
def update(self, *args, **kwargs) -> None:
""" Let's be honest - this is to make the typing system happy"""
self.update_state(args[0])
super().update(*args, **kwargs)
def closest_to(self,
other: Iterable[C],
bounds: Bounds,
to_include: Callable[[C], bool] = lambda _: True) \
-> tuple[Optional[C], float]:
self_rect: Optional[pygame.rect.Rect] = self.rect
if self_rect is None:
return (None, 0.0)
span = zutil.span(bounds)
span_mid = span / 2.0
curmin: float = sys.maxsize
curactor: Optional[C] = None
pos = self.position
for each in other:
if not to_include(each):
continue
dist = pos.distance(each.position)
if dist > span_mid:
dist = span - dist
if dist < curmin:
curmin = dist
curactor = each
return (curactor, curmin)
def update_state(self, field: World) -> None:
pass
class Actor(Entity):
def __init__(self, color: pygame.Color, default_energy: float = 0.0):
super().__init__(color)
self.energy: float = default_energy
self.change_dir()
@property
def x(self) -> int:
return self.rect.x
@property
def y(self) -> int:
return self.rect.y
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.ellipse(self.image, color, self.image.get_rect())
def update_pos(self, direc: Direction) -> None:
new_x = self.x + (direc.x * self.energy)
new_y = self.y + (direc.y * self.energy)
self.rect.x = int(round(new_x))
self.rect.y = int(round(new_y))
def hit_edge(self, parent_rect: pygame.rect.Rect) -> None:
if self.rect.left < parent_rect.left:
self.rect.right = parent_rect.right
if self.rect.right > parent_rect.right:
self.rect.left = parent_rect.left
if self.rect.top < parent_rect.top:
self.rect.bottom = parent_rect.bottom
if self.rect.bottom > parent_rect.bottom:
self.rect.top = parent_rect.top
def change_dir(self) -> None:
self.current_dir = zutil.random_direction()
def update_state(self, field: World) -> None:
self.update_pos(self.current_dir)
super().update_state(field)
ZOMBIE_VISION: int = 100
ZOMBIE_ATTACK_WAIT_MAX: int = 25
ZOMBIE_COLOR: pygame.Color = pygame.Color('red')
ZOMBIE_ENERGY: float = 2.0
RECALCULATE_HUMANS_SEEN: int = 10
class ZombieSprite(Actor):
def __init__(self):
self.angle = zutil.random_angle()
super().__init__(ZOMBIE_COLOR, ZOMBIE_ENERGY)
self.attack_wait = random.randint(
int(ZOMBIE_ATTACK_WAIT_MAX / 2), ZOMBIE_ATTACK_WAIT_MAX)
def update_state(self, field: World) -> None:
if self.attack_wait > 0:
self.attack_wait -= 1
return
goto = Point(*self.rect.center)
goto = self.run_to_humans(field, goto)
next_point = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
# TODO Revisit
victim_angle = Direction.from_points(Point(*self.rect.center), next_point).to_angle()
if victim_angle > self.angle:
self.angle += math.radians(10)
elif victim_angle < self.angle:
self.angle -= math.radians(10)
self.current_dir = Direction.from_angle(self.angle)
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_HUMANS_SEEN)
def humans_in_vision(self, field: World) -> Iterable[Human]:
return [human for human in field.humans
if self.position.distance(human.position) < ZOMBIE_VISION]
def run_to_humans(self, field: World, goto: Point) -> Point:
humans = self.humans_in_vision(field)
bounds = field.bounds
victim, _ = self.closest_to(humans, bounds)
if victim is None:
return goto
span = zutil.span(bounds)
span_mid = span / 2.0
direc = Direction.from_points(self.position, victim.position)
dist = self.position.distance(victim.position)
if dist > span_mid:
dist = span - dist
direc = -direc
factor_dist = float(ZOMBIE_VISION - dist)
goto = Point(int(goto.x + (factor_dist * direc.x)),
int(goto.y + (factor_dist * direc.y)))
return goto
def change_dir(self) -> None:
self.angle = zutil.random_angle_change(self.angle, 10)
self.current_dir = Direction.from_angle(self.angle)
HUMAN_VISION: int = 50
HUMAN_COLOR: pygame.Color = pygame.Color('pink')
HUMAN_ENERGY_LEVEL: float = 4.0
HUMAN_HUNGRY_LEVEL: float = HUMAN_ENERGY_LEVEL / 2
RECALCULATE_ZOMBIES_SEEN: int = 5
class HumanSprite(Actor):
def __init__(self):
super().__init__(HUMAN_COLOR)
self.lifetime: Generator[float, None, None] = self.new_lifetime()
def eat_food(self, food: Food) -> None:
if self.is_hungry():
food.consume()
self.lifetime = self.new_lifetime()
self.change_dir()
def is_hungry(self) -> bool:
return self.energy < HUMAN_HUNGRY_LEVEL
def is_dead(self) -> bool:
return self.energy == 0
def new_lifetime(self) -> Generator[float, None, None]:
return zutil.xfrange(2 + (random.random() * 2), 0, -0.0005)
def alpha(self) -> float:
result = self.energy / 2.0
return min(result, 1)
def update_state(self, field: World) -> None:
self.energy = next(self.lifetime, 0)
if self.is_dead():
self.kill()
return
self.color.a = int(255 * self.alpha())
self.draw_image(self.color)
goto = Point(*self.rect.center)
goto = self.run_from_zombies(field, goto)
goto = self.run_to_food(field, goto)
next_pos = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
go_to_dir = Direction.from_points(Point(*self.rect.center), next_pos)
self.current_dir = go_to_dir
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_ZOMBIES_SEEN)
def zombies_in_vision(self, field: World) -> Iterable[Zombie]:
return [zombie for zombie in field.zombies
if self.position.distance(zombie.position) <= HUMAN_VISION]
def run_from_zombies(self, field: World, goto: Point) -> Point:
span = zutil.span(field.bounds)
span_mid = span / 2.0
for zombie in self.zombies_in_vision(field):
dist = self.position.distance(zombie.position)
rev_dir = False
if dist > span_mid:
dist = span - dist
rev_dir = True
factor_dist = float(HUMAN_VISION - dist) ** 2
direc = Direction.from_points(self.position, zombie.position)
if not rev_dir:
direc = -direc
goto = Point(goto.x + (factor_dist * direc.x),
goto.y + (factor_dist * direc.y))
return goto
def run_to_food(self, field: World, goto: Point) -> Point:
if self.is_hungry():
span = zutil.span(field.bounds)
span_mid = span / 2.0
food, _ = self.closest_to(field.food, field.bounds)
if food is not None:
direc = Direction.from_points(self.position, food.position)
dist = self.position.distance(food.position)
if dist > span_mid:
direc = -direc
factor = (float(self.energy) / 4 * HUMAN_VISION) ** 2
goto = Point(goto.x + (factor * direc.x), goto.y + (factor * direc.y))
return goto
class Consumable(Entity):
def __init__(self, color: pygame.Color, amount: int = 5):
super().__init__(color)
self.amount: int = amount
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.rect(self.image, color, self.image.get_rect())
def consume(self) -> None:
self.amount -= 1
if not self.has_more():
self.kill()
def has_more(self) -> bool:
return self.amount > 0
FOOD_COLOR: pygame.Color = pygame.Color('green')
DEFAULT_FOOD_AMOUNT: int = 25
class FoodSprite(Consumable):
def __init__(self):
super().__init__(FOOD_COLOR, amount=DEFAULT_FOOD_AMOUNT)
| new_point = pos + self._mouse_offset
self.rect.center = int(new_point.x), int(new_point.y)
self.reset_pos() | identifier_body |
entities.py | """
Created on Dec 7, 2014
@author: bbuxton
"""
import random
import sys
import pygame
import math
from collections.abc import Callable, Iterator
from typing import Generator, Iterable, Optional, Generic, Tuple, TypeVar, Type, cast
import zombiesim.util as zutil
from zombiesim.type_def import Bounds, Direction, Food, Human, HasPosition,\
PointProducer, Point, World, Zombie
SpritePredicate = Callable[[pygame.sprite.Sprite], bool]
EntityCallback = Callable[['Entity'], None]
T = TypeVar('T', bound='Entity')
C = TypeVar('C', bound=HasPosition)
class EntityGroup(pygame.sprite.Group, Generic[T]):
def __init__(self, clazz: type[T]):
super().__init__()
self.entity_class: type[T] = clazz
def create_one(self) -> T:
entity: T = self.entity_class()
self.add(entity)
return entity
def __iter__(self) -> Iterator[T]:
return cast(Iterator[T], super().__iter__())
ENTITY_WIDTH = 10
ENTITY_HEIGHT = 10
class Entity(pygame.sprite.Sprite):
@classmethod
def create_group(cls: Type[T],
size: int,
point_getter: PointProducer) -> EntityGroup[T]:
all_group = EntityGroup[T](cls)
for _ in range(size):
new_entity = all_group.create_one()
pos = point_getter()
new_entity.rect.center = int(pos.x), int(pos.y)
return all_group
def __init__(self, color: pygame.Color = pygame.Color('black')):
super().__init__()
self.color: pygame.Color = color
self._mouse_groups: list[pygame.sprite.AbstractGroup] = []
self.image: pygame.Surface = self.create_image()
self.rect: pygame.rect.Rect = self.image.get_rect()
self.radius: float = min(self.rect.width, self.rect.height) / 2
self.draw_image(self.color) # FIXME
@property
def center(self) -> Tuple[int,int]:
return self.rect.center
@property
def position(self) -> Point:
return Point(*self.center)
def create_image(self) -> pygame.Surface:
image = pygame.Surface(
[ENTITY_WIDTH, ENTITY_HEIGHT], flags=pygame.SRCALPHA)
image.fill(pygame.Color(0, 0, 0, 0))
return image
def draw_image(self, color: pygame.Color) -> None:
pass
def reset_pos(self) -> None:
pass
def pick_up(self, pos: Point) -> None:
groups = self.groups()
self._mouse_groups = []
for group in groups:
group.remove(self)
self._mouse_groups.append(group)
self._mouse_offset = self.position - pos
def update_pick_up(self, pos: Point) -> None:
new_point = pos + self._mouse_offset
self.rect.center = int(new_point.x), int(new_point.y)
self.reset_pos()
def put_down(self, pos: Point):
self.update_pick_up(pos)
for group in self._mouse_groups:
group.add(self)
del self._mouse_groups
del self._mouse_offset
def update(self, *args, **kwargs) -> None:
""" Let's be honest - this is to make the typing system happy"""
self.update_state(args[0])
super().update(*args, **kwargs)
def closest_to(self,
other: Iterable[C],
bounds: Bounds,
to_include: Callable[[C], bool] = lambda _: True) \
-> tuple[Optional[C], float]:
self_rect: Optional[pygame.rect.Rect] = self.rect
if self_rect is None:
return (None, 0.0)
span = zutil.span(bounds)
span_mid = span / 2.0
curmin: float = sys.maxsize | continue
dist = pos.distance(each.position)
if dist > span_mid:
dist = span - dist
if dist < curmin:
curmin = dist
curactor = each
return (curactor, curmin)
def update_state(self, field: World) -> None:
pass
class Actor(Entity):
def __init__(self, color: pygame.Color, default_energy: float = 0.0):
super().__init__(color)
self.energy: float = default_energy
self.change_dir()
@property
def x(self) -> int:
return self.rect.x
@property
def y(self) -> int:
return self.rect.y
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.ellipse(self.image, color, self.image.get_rect())
def update_pos(self, direc: Direction) -> None:
new_x = self.x + (direc.x * self.energy)
new_y = self.y + (direc.y * self.energy)
self.rect.x = int(round(new_x))
self.rect.y = int(round(new_y))
def hit_edge(self, parent_rect: pygame.rect.Rect) -> None:
if self.rect.left < parent_rect.left:
self.rect.right = parent_rect.right
if self.rect.right > parent_rect.right:
self.rect.left = parent_rect.left
if self.rect.top < parent_rect.top:
self.rect.bottom = parent_rect.bottom
if self.rect.bottom > parent_rect.bottom:
self.rect.top = parent_rect.top
def change_dir(self) -> None:
self.current_dir = zutil.random_direction()
def update_state(self, field: World) -> None:
self.update_pos(self.current_dir)
super().update_state(field)
ZOMBIE_VISION: int = 100
ZOMBIE_ATTACK_WAIT_MAX: int = 25
ZOMBIE_COLOR: pygame.Color = pygame.Color('red')
ZOMBIE_ENERGY: float = 2.0
RECALCULATE_HUMANS_SEEN: int = 10
class ZombieSprite(Actor):
def __init__(self):
self.angle = zutil.random_angle()
super().__init__(ZOMBIE_COLOR, ZOMBIE_ENERGY)
self.attack_wait = random.randint(
int(ZOMBIE_ATTACK_WAIT_MAX / 2), ZOMBIE_ATTACK_WAIT_MAX)
def update_state(self, field: World) -> None:
if self.attack_wait > 0:
self.attack_wait -= 1
return
goto = Point(*self.rect.center)
goto = self.run_to_humans(field, goto)
next_point = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
# TODO Revisit
victim_angle = Direction.from_points(Point(*self.rect.center), next_point).to_angle()
if victim_angle > self.angle:
self.angle += math.radians(10)
elif victim_angle < self.angle:
self.angle -= math.radians(10)
self.current_dir = Direction.from_angle(self.angle)
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_HUMANS_SEEN)
def humans_in_vision(self, field: World) -> Iterable[Human]:
return [human for human in field.humans
if self.position.distance(human.position) < ZOMBIE_VISION]
def run_to_humans(self, field: World, goto: Point) -> Point:
humans = self.humans_in_vision(field)
bounds = field.bounds
victim, _ = self.closest_to(humans, bounds)
if victim is None:
return goto
span = zutil.span(bounds)
span_mid = span / 2.0
direc = Direction.from_points(self.position, victim.position)
dist = self.position.distance(victim.position)
if dist > span_mid:
dist = span - dist
direc = -direc
factor_dist = float(ZOMBIE_VISION - dist)
goto = Point(int(goto.x + (factor_dist * direc.x)),
int(goto.y + (factor_dist * direc.y)))
return goto
def change_dir(self) -> None:
self.angle = zutil.random_angle_change(self.angle, 10)
self.current_dir = Direction.from_angle(self.angle)
HUMAN_VISION: int = 50
HUMAN_COLOR: pygame.Color = pygame.Color('pink')
HUMAN_ENERGY_LEVEL: float = 4.0
HUMAN_HUNGRY_LEVEL: float = HUMAN_ENERGY_LEVEL / 2
RECALCULATE_ZOMBIES_SEEN: int = 5
class HumanSprite(Actor):
def __init__(self):
super().__init__(HUMAN_COLOR)
self.lifetime: Generator[float, None, None] = self.new_lifetime()
def eat_food(self, food: Food) -> None:
if self.is_hungry():
food.consume()
self.lifetime = self.new_lifetime()
self.change_dir()
def is_hungry(self) -> bool:
return self.energy < HUMAN_HUNGRY_LEVEL
def is_dead(self) -> bool:
return self.energy == 0
def new_lifetime(self) -> Generator[float, None, None]:
return zutil.xfrange(2 + (random.random() * 2), 0, -0.0005)
def alpha(self) -> float:
result = self.energy / 2.0
return min(result, 1)
def update_state(self, field: World) -> None:
self.energy = next(self.lifetime, 0)
if self.is_dead():
self.kill()
return
self.color.a = int(255 * self.alpha())
self.draw_image(self.color)
goto = Point(*self.rect.center)
goto = self.run_from_zombies(field, goto)
goto = self.run_to_food(field, goto)
next_pos = Point(goto.x + self.current_dir.x,
goto.y + self.current_dir.y)
go_to_dir = Direction.from_points(Point(*self.rect.center), next_pos)
self.current_dir = go_to_dir
super().update_state(field)
@zutil.cache_for(times=RECALCULATE_ZOMBIES_SEEN)
def zombies_in_vision(self, field: World) -> Iterable[Zombie]:
return [zombie for zombie in field.zombies
if self.position.distance(zombie.position) <= HUMAN_VISION]
def run_from_zombies(self, field: World, goto: Point) -> Point:
span = zutil.span(field.bounds)
span_mid = span / 2.0
for zombie in self.zombies_in_vision(field):
dist = self.position.distance(zombie.position)
rev_dir = False
if dist > span_mid:
dist = span - dist
rev_dir = True
factor_dist = float(HUMAN_VISION - dist) ** 2
direc = Direction.from_points(self.position, zombie.position)
if not rev_dir:
direc = -direc
goto = Point(goto.x + (factor_dist * direc.x),
goto.y + (factor_dist * direc.y))
return goto
def run_to_food(self, field: World, goto: Point) -> Point:
if self.is_hungry():
span = zutil.span(field.bounds)
span_mid = span / 2.0
food, _ = self.closest_to(field.food, field.bounds)
if food is not None:
direc = Direction.from_points(self.position, food.position)
dist = self.position.distance(food.position)
if dist > span_mid:
direc = -direc
factor = (float(self.energy) / 4 * HUMAN_VISION) ** 2
goto = Point(goto.x + (factor * direc.x), goto.y + (factor * direc.y))
return goto
class Consumable(Entity):
def __init__(self, color: pygame.Color, amount: int = 5):
super().__init__(color)
self.amount: int = amount
def draw_image(self, color: pygame.Color) -> None:
pygame.draw.rect(self.image, color, self.image.get_rect())
def consume(self) -> None:
self.amount -= 1
if not self.has_more():
self.kill()
def has_more(self) -> bool:
return self.amount > 0
FOOD_COLOR: pygame.Color = pygame.Color('green')
DEFAULT_FOOD_AMOUNT: int = 25
class FoodSprite(Consumable):
def __init__(self):
super().__init__(FOOD_COLOR, amount=DEFAULT_FOOD_AMOUNT) | curactor: Optional[C] = None
pos = self.position
for each in other:
if not to_include(each): | random_line_split |
parser.py | from struct import unpack_from
from dbparse import parseReplay
from collections import Counter, deque
import numpy as np
import math
import json
import urllib.request
import sys
import os
from copy import deepcopy
# constants
HITMAP_RESOLUTION = 64
HITMAP_SIZE = 128
TIMING_RESOLUTION = 64
class ModeError(Exception):
def __init__(self, value):
self.mode = value
def __str__(self):
return repr(self.mode)
class HitObject:
x = -1
y = -1
time = -1
lenient = False
def __init__(self, x, y, time, lenient):
self.x = x
self.y = y
self.time = time
self.lenient = lenient
self.tags = []
def add_tag(self, tag):
if tag not in self.tags:
self.tags.append(tag)
def __str__(self):
return '(%d, %d, %d, %s)' % \
(self.time, self.x, self.y, self.tags)
class TimingPoint:
time = -1
mpb = -1
def __init__(self, time, mpb):
self.time = time
self.mpb = mpb
def parse_object(line):
params = line.split(',')
x = float(params[0])
y = float(params[1])
time = int(params[2])
objtype = int(params[3])
# hit circle
if (objtype & 1) != 0:
return HitObject(x, y, time, False)
# sliders
elif (objtype & 2) != 0:
return HitObject(x, y, time, True)
# ignore spinners
else:
return None
"""
Takes a beatmap file as input, and outputs a list of
beatmap objects, sorted by their time offset.
"""
def parse_osu(osu):
objects = []
timing_points = []
beatmap = {}
in_objects = False
in_timings = False
# parse the osu! file
for line in osu:
if 'CircleSize' in line:
beatmap['cs'] = float(line.split(':')[1])
elif 'OverallDifficulty' in line:
beatmap['od'] = float(line.split(':')[1])
elif 'HPDrainRate' in line:
beatmap['hp'] = float(line.split(':')[1])
elif 'ApproachRate' in line:
beatmap['ar'] = float(line.split(':')[1])
elif 'Mode' in line:
mode = int(line.split(':')[1])
if mode != 0:
raise ModeError(mode)
elif 'Title' in line and 'Unicode' not in line:
beatmap['title'] = line.split(':')[1].strip()
beatmap['title_lower'] = beatmap['title'].lower()
elif 'Version' in line:
beatmap['version'] = line.split(':')[1].strip()
beatmap['version_lower'] = beatmap['version'].lower()
elif 'Artist' in line and 'Unicode' not in line:
beatmap['artist'] = line.split(':')[1].strip()
beatmap['artist_lower'] = beatmap['artist'].lower()
elif 'Creator' in line:
beatmap['creator'] = line.split(':')[1].strip()
beatmap['creator_lower'] = beatmap['creator'].lower()
elif 'BeatmapID' in line:
beatmap['beatmap_id'] = line.split(':')[1].strip()
elif 'BeatmapSetID' in line:
beatmap['beatmap_set_id'] = line.split(':')[1].strip()
elif '[TimingPoints]' in line:
in_timings = True
elif in_timings:
if line.strip() == '':
in_timings = False
continue
args = line.split(',')
time = float(args[0])
mpb = float(args[1])
if mpb > 0:
pt = TimingPoint(time, mpb)
timing_points.append(pt)
if '[HitObjects]' in line:
in_objects = True
elif in_objects:
obj = parse_object(line)
if obj != None:
objects.append(obj)
# find streams
for i in range(len(objects) - 1):
obj0 = objects[i]
obj1 = objects[i+1]
# get current mpb
mpb = -1
for t in timing_points:
mpb = t.mpb
if obj0.time >= t.time:
break
timing_diff = obj1.time - obj0.time
# print(str(timing_diff) + ' ' + str(mpb/4 + 10))
if timing_diff < mpb/4.0 + 10.0:
obj0.add_tag('stream')
obj1.add_tag('stream')
return (objects, beatmap)
# get the timing window for a note with the given OD and mods
def timing_window(od, hd, ez):
mod_od = od
if ez:
mod_od = 0.5 * od
elif hd:
mod_od = min(1.4 * od, 10)
w300 = 79.5 - 6.0 * mod_od
w100 = 139.5 - 8.0 * mod_od
w50 = 199.5 - 10.0 * mod_od
return (w300, w100, w50)
def in_window(obj, time, window):
return obj.time - window[2] <= time and \
obj.time + window[2] >= time
def pushed_buttons(prev_input, cur_input):
|
def circle_radius(cs, hd, ez):
mod_cs = cs
if hd:
mod_cs *= 1.3
elif ez:
mod_cs /= 2
return (104.0 - mod_cs * 8.0) / 2.0
def dist(p_input, obj):
return math.sqrt(math.pow(p_input['x'] - obj.x, 2) + \
math.pow(p_input['y'] - obj.y, 2))
def score_hit(time, obj, window):
if obj.lenient and abs(time - obj.time) <= window[2]:
return '300'
if abs(time - obj.time) <= window[0]:
return '300'
elif abs(time - obj.time) <= window[1]:
return '100'
elif abs(time - obj.time) <= window[2]:
return '50'
return 'welp'
def transform_coords(cur_input, prev_obj, cur_obj):
dx = cur_input['x'] - cur_obj.x
dy = cur_input['y'] - cur_obj.y
theta = math.pi / 2.0
if prev_obj != None:
thetaprime = math.atan2(cur_obj.y - prev_obj.y, cur_obj.x - prev_obj.x)
theta = math.pi / 2.0 - thetaprime
# get the rotation matrix
a = math.cos(theta)
b = math.sin(theta)
R = np.matrix([[a, -b], [b, a]])
# apply the rotation matrix to the coordinates
coords = np.ravel(R * np.matrix([[dx], [dy]]))
# remap to hitmap pixel coordinates
coords += HITMAP_SIZE / 2
# one last remapping to hitmap index
xi = int(coords[0] / HITMAP_SIZE * HITMAP_RESOLUTION)
yi = int(coords[1] / HITMAP_SIZE * HITMAP_RESOLUTION)
return(xi, yi)
"""
Simulates the game, collecting statistics on the way.
"""
def simulate(objects, difficulty, replay):
mods = replay['mods']
WINDOW = timing_window(difficulty['od'], mods['hard_rock'], mods['easy'])
RADIUS = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
replay_data = replay['replay_data']
end_time = max([objects[-1].time, replay_data[-1]['time']])
difficulty['length'] = objects[-1].time
# for o in replay_data:
# if o['time'] > 49500 and o['time'] < 49700:
# print(o)
# iteration variables
inputs = deque(replay_data)
objects = deque(objects)
cur_input = {'time': -1, 'keys': \
{'M1': False, 'M2': False, 'K1': False, 'K2': False}}
prev_obj = None
cur_obj = None
marked = False
# stats variables
timeline = []
keys = {'M1': 0, 'M2': 0, 'K1': 0, 'K2': 0}
hitmap = np.zeros((HITMAP_RESOLUTION, HITMAP_RESOLUTION))
timings = np.zeros(TIMING_RESOLUTION)
stream_num = 0
stream_timings = []
all_timings = []
extra_inputs = []
missed_notes = []
# first, reverse y axis if hr
if mods['hard_rock']:
for o in objects:
o.y = 384 - o.y
for time in range(end_time):
# check if input advances
if len(inputs) > 0:
next_input = inputs[0]
if time > next_input['time']:
prev_input = cur_input
cur_input = inputs.popleft()
# check if player pushed a button
buttons = pushed_buttons(prev_input, cur_input)
if len(buttons) > 0:
# add the pressed key to stats
for k in buttons:
keys[k] += 1
# check if player hit current hitobject
if cur_obj != None and dist(cur_input, cur_obj) < RADIUS:
# it's a hit!
score_val = score_hit(time, cur_obj, WINDOW)
time_diff = time - cur_obj.time
# if cur_obj.time > 10000 and cur_obj.time < 11000:
# print('%d - %d' % (cur_input['time'], cur_obj.time))
# get the x and y hitmap coords
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
hitmap[yi][xi] += 1
# get the timing bucket
bucket = int(time_diff / (WINDOW[2] * 2) * \
TIMING_RESOLUTION) + int(TIMING_RESOLUTION / 2)
if bucket >= 0 and bucket < len(timings):
timings[bucket] += 1
all_timings.append(time_diff)
# if it's a stream, record the timing
if 'stream' in cur_obj.tags:
if stream_num >= len(stream_timings):
stream_timings.append([])
stream_timings[stream_num].append(time_diff)
stream_num += 1
else:
stream_num = 0
# if the scoreval is 100 or 50, add it to the timeline
if score_val == '100' or score_val == '50':
timeline.append({ \
't': time, \
'event': score_val, \
'timing': time_diff, \
'xi': xi, \
'yi': yi
})
prev_obj = cur_obj
cur_obj = None
else:
# wasted a click
extra_inputs.append(cur_input)
# hit object expires
if cur_obj != None and time > cur_obj.time + WINDOW[2]:
event = { \
't': cur_obj.time, \
'event': 'miss', \
'timing': 0, \
'xi': -1, \
'yi': -1 \
}
timeline.append(event)
missed_notes.append({
'prev': prev_obj, \
'cur': cur_obj, \
'event': event \
})
prev_obj = cur_obj
cur_obj = None
# pop in the next object if there's a vacancy
if len(objects) > 0:
next_obj = objects[0]
if cur_obj == None and in_window(next_obj, time, WINDOW):
cur_obj = objects.popleft()
# try to match up missed notes to nearest hit attempts
for note in missed_notes:
cur_obj = note['cur']
prev_obj = note['prev']
event = note['event']
for cur_input in extra_inputs:
if in_window(cur_obj, cur_input['time'], WINDOW):
# print('Paired (%f, %f) -> (%d, %f, %f) with (%d, %f, %f)' % (prev_obj.x, prev_obj.y, cur_obj.time, cur_obj.x, cur_obj.y, cur_input['time'], cur_input['x'], cur_input['y']))
# print('%f > %f' % (dist(cur_input, cur_obj), RADIUS))
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
# print('(%d, %d)' % (xi, yi))
time_diff = cur_input['time'] - cur_obj.time
event['timing'] = time_diff
event['xi'] = xi
event['yi'] = yi
# done parsing! now to format the json
# get streaming averages
stream_avg = [sum(l) / len(l) for l in stream_timings]
# get unstable rate
unstable_rate = np.std(all_timings) * 10
result = deepcopy(replay)
result.pop('replay_data')
result['timeline'] = timeline
result['keys'] = dict(keys)
result['hitmap'] = [int(i) for i in np.ravel(hitmap).tolist()]
result['hitmap_resolution'] = HITMAP_RESOLUTION
result['hitmap_size'] = HITMAP_SIZE
result['circle_size'] = RADIUS
result['timings'] = [int(i) for i in timings.tolist()]
result['stream_timings'] = stream_avg
result['unstable_rate'] = unstable_rate
result['beatmap'] = difficulty
return result
def plot_hitmap(hitmap):
import matplotlib.pyplot as plt
res = len(hitmap)
mods = replay['mods']
csr = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
fig, axis = plt.subplots()
heatmap = axis.pcolor(hitmap, cmap=plt.cm.viridis, alpha=1.0)
circle = plt.Circle((HITMAP_RESOLUTION/2, HITMAP_RESOLUTION/2), \
csr/HITMAP_SIZE*HITMAP_RESOLUTION, color='red', fill=False)
fig.gca().add_artist(circle);
axis.set_aspect('equal')
plt.xlim(0, res)
plt.ylim(0, res)
plt.show();
def get_beatmap_id(bm_hash):
# api call to find the beatmap id
apiurl = 'https://osu.ppy.sh/api/get_beatmaps?'
key = open('apikey').read().strip()
url = apiurl + 'k=' + key + '&h=' + bm_hash
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
return None
res = str(response.read(), 'utf-8')
jsonRes = json.loads(res)
res = jsonRes[0]
return (res['beatmap_id'], res['beatmapset_id'], res['difficultyrating'])
if __name__ == '__main__':
# bm_file = 'data/granat.osu'
# rp_file = 'data/granat_extra.osr'
# bm_file = 'data/junshin_always.osu'
# rp_file = 'data/junshin_always_colorful.osr'
# bm_file = 'data/darling_insane.osu'
# rp_file = 'data/darling_insane.osr'
rp_file = sys.argv[1]
replay = parseReplay(open(rp_file, 'rb').read())
if replay['mode'] != 0:
print(json.dumps({'error': 'Unsupported game mode.'}))
sys.exit(0)
# attempt to locate beatmap file in /data
bm_hash = replay['beatmap_md5']
bm_path = 'data/' + bm_hash + '.osu'
bm_file = None
bm_tuple = get_beatmap_id(bm_hash)
if bm_tuple == None:
print(json.dumps({'error': 'Could not access the osu! api at this time. Please try again in a bit.'}))
sys.exit(0);
bm_id, bm_set_id, sd = bm_tuple
if os.path.isfile(bm_path):
bm_file = open(bm_path)
else:
# download the beatmap file to the local file system
if bm_id != None:
urllib.request.urlretrieve('https://osu.ppy.sh/osu/' + bm_id, bm_path)
bm_file = open(bm_path)
if bm_file == None:
print(json.dumps({'error': 'Invalid beatmap hash: beatmap not found'}))
replay_file = open(rp_file, 'rb').read()
objects, beatmap = parse_osu(bm_file)
beatmap['beatmap_id'] = bm_id
beatmap['beatmap_set_id'] = bm_set_id
beatmap['beatmap_md5'] = bm_hash
beatmap['sd'] = sd
results = simulate(objects, beatmap, replay)
print(json.dumps(results))
# plot_hitmap(np.reshape(results['hitmap'], (HITMAP_RESOLUTION, HITMAP_RESOLUTION))
| buttons = []
for k in ['K1', 'K2', 'M1', 'M2']:
if cur_input['keys'][k] and not prev_input['keys'][k]:
buttons.append(k)
return buttons | identifier_body |
parser.py | from struct import unpack_from
from dbparse import parseReplay
from collections import Counter, deque
import numpy as np
import math
import json
import urllib.request
import sys
import os
from copy import deepcopy
# constants
HITMAP_RESOLUTION = 64
HITMAP_SIZE = 128
TIMING_RESOLUTION = 64
class ModeError(Exception):
def __init__(self, value):
self.mode = value
def __str__(self):
return repr(self.mode)
class HitObject:
x = -1
y = -1
time = -1
lenient = False
def __init__(self, x, y, time, lenient):
self.x = x
self.y = y
self.time = time
self.lenient = lenient
self.tags = []
def add_tag(self, tag):
if tag not in self.tags:
self.tags.append(tag)
def __str__(self):
return '(%d, %d, %d, %s)' % \
(self.time, self.x, self.y, self.tags)
class TimingPoint:
time = -1
mpb = -1
def __init__(self, time, mpb):
self.time = time
self.mpb = mpb
def parse_object(line):
params = line.split(',')
x = float(params[0])
y = float(params[1])
time = int(params[2])
objtype = int(params[3])
# hit circle
if (objtype & 1) != 0:
return HitObject(x, y, time, False)
# sliders
elif (objtype & 2) != 0:
return HitObject(x, y, time, True)
# ignore spinners
else:
return None
"""
Takes a beatmap file as input, and outputs a list of
beatmap objects, sorted by their time offset.
"""
def parse_osu(osu):
objects = []
timing_points = []
beatmap = {}
in_objects = False
in_timings = False
# parse the osu! file
for line in osu:
if 'CircleSize' in line:
beatmap['cs'] = float(line.split(':')[1])
elif 'OverallDifficulty' in line:
beatmap['od'] = float(line.split(':')[1])
elif 'HPDrainRate' in line:
beatmap['hp'] = float(line.split(':')[1])
elif 'ApproachRate' in line:
beatmap['ar'] = float(line.split(':')[1])
elif 'Mode' in line:
mode = int(line.split(':')[1])
if mode != 0:
raise ModeError(mode)
elif 'Title' in line and 'Unicode' not in line:
beatmap['title'] = line.split(':')[1].strip()
beatmap['title_lower'] = beatmap['title'].lower()
elif 'Version' in line:
beatmap['version'] = line.split(':')[1].strip()
beatmap['version_lower'] = beatmap['version'].lower()
elif 'Artist' in line and 'Unicode' not in line:
beatmap['artist'] = line.split(':')[1].strip()
beatmap['artist_lower'] = beatmap['artist'].lower()
elif 'Creator' in line:
beatmap['creator'] = line.split(':')[1].strip()
beatmap['creator_lower'] = beatmap['creator'].lower()
elif 'BeatmapID' in line:
beatmap['beatmap_id'] = line.split(':')[1].strip()
elif 'BeatmapSetID' in line:
beatmap['beatmap_set_id'] = line.split(':')[1].strip()
elif '[TimingPoints]' in line:
in_timings = True
elif in_timings:
if line.strip() == '':
in_timings = False
continue
args = line.split(',')
time = float(args[0])
mpb = float(args[1])
if mpb > 0:
pt = TimingPoint(time, mpb)
timing_points.append(pt)
if '[HitObjects]' in line:
in_objects = True
elif in_objects:
obj = parse_object(line)
if obj != None:
objects.append(obj)
# find streams
for i in range(len(objects) - 1):
obj0 = objects[i]
obj1 = objects[i+1]
# get current mpb
mpb = -1
for t in timing_points:
mpb = t.mpb
if obj0.time >= t.time:
break
timing_diff = obj1.time - obj0.time
# print(str(timing_diff) + ' ' + str(mpb/4 + 10))
if timing_diff < mpb/4.0 + 10.0:
obj0.add_tag('stream')
obj1.add_tag('stream')
return (objects, beatmap)
# get the timing window for a note with the given OD and mods
def timing_window(od, hd, ez):
mod_od = od
if ez:
mod_od = 0.5 * od
elif hd:
mod_od = min(1.4 * od, 10)
w300 = 79.5 - 6.0 * mod_od
w100 = 139.5 - 8.0 * mod_od
w50 = 199.5 - 10.0 * mod_od
return (w300, w100, w50)
def in_window(obj, time, window):
return obj.time - window[2] <= time and \
obj.time + window[2] >= time
def pushed_buttons(prev_input, cur_input):
buttons = []
for k in ['K1', 'K2', 'M1', 'M2']:
if cur_input['keys'][k] and not prev_input['keys'][k]:
buttons.append(k)
return buttons
def circle_radius(cs, hd, ez):
mod_cs = cs
if hd:
mod_cs *= 1.3
elif ez:
mod_cs /= 2
return (104.0 - mod_cs * 8.0) / 2.0
def dist(p_input, obj):
return math.sqrt(math.pow(p_input['x'] - obj.x, 2) + \
math.pow(p_input['y'] - obj.y, 2))
def score_hit(time, obj, window):
if obj.lenient and abs(time - obj.time) <= window[2]:
return '300'
if abs(time - obj.time) <= window[0]:
return '300'
elif abs(time - obj.time) <= window[1]:
return '100'
elif abs(time - obj.time) <= window[2]:
return '50'
return 'welp'
def transform_coords(cur_input, prev_obj, cur_obj):
dx = cur_input['x'] - cur_obj.x
dy = cur_input['y'] - cur_obj.y
theta = math.pi / 2.0
if prev_obj != None:
thetaprime = math.atan2(cur_obj.y - prev_obj.y, cur_obj.x - prev_obj.x)
theta = math.pi / 2.0 - thetaprime
# get the rotation matrix
a = math.cos(theta)
b = math.sin(theta)
R = np.matrix([[a, -b], [b, a]])
# apply the rotation matrix to the coordinates
coords = np.ravel(R * np.matrix([[dx], [dy]]))
# remap to hitmap pixel coordinates
coords += HITMAP_SIZE / 2
# one last remapping to hitmap index
xi = int(coords[0] / HITMAP_SIZE * HITMAP_RESOLUTION)
yi = int(coords[1] / HITMAP_SIZE * HITMAP_RESOLUTION)
return(xi, yi)
"""
Simulates the game, collecting statistics on the way.
"""
def simulate(objects, difficulty, replay):
mods = replay['mods']
WINDOW = timing_window(difficulty['od'], mods['hard_rock'], mods['easy'])
RADIUS = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
replay_data = replay['replay_data']
end_time = max([objects[-1].time, replay_data[-1]['time']])
difficulty['length'] = objects[-1].time
# for o in replay_data:
# if o['time'] > 49500 and o['time'] < 49700:
# print(o)
# iteration variables
inputs = deque(replay_data)
objects = deque(objects)
cur_input = {'time': -1, 'keys': \
{'M1': False, 'M2': False, 'K1': False, 'K2': False}}
prev_obj = None
cur_obj = None
marked = False
# stats variables
timeline = []
keys = {'M1': 0, 'M2': 0, 'K1': 0, 'K2': 0}
hitmap = np.zeros((HITMAP_RESOLUTION, HITMAP_RESOLUTION))
timings = np.zeros(TIMING_RESOLUTION)
stream_num = 0
stream_timings = []
all_timings = []
extra_inputs = []
missed_notes = []
# first, reverse y axis if hr
if mods['hard_rock']:
for o in objects:
o.y = 384 - o.y
for time in range(end_time):
# check if input advances
if len(inputs) > 0:
next_input = inputs[0]
if time > next_input['time']:
prev_input = cur_input
cur_input = inputs.popleft()
# check if player pushed a button
buttons = pushed_buttons(prev_input, cur_input)
if len(buttons) > 0:
# add the pressed key to stats
for k in buttons:
keys[k] += 1
# check if player hit current hitobject
if cur_obj != None and dist(cur_input, cur_obj) < RADIUS:
# it's a hit!
score_val = score_hit(time, cur_obj, WINDOW)
time_diff = time - cur_obj.time
# if cur_obj.time > 10000 and cur_obj.time < 11000:
# print('%d - %d' % (cur_input['time'], cur_obj.time))
# get the x and y hitmap coords
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
hitmap[yi][xi] += 1
# get the timing bucket
bucket = int(time_diff / (WINDOW[2] * 2) * \
TIMING_RESOLUTION) + int(TIMING_RESOLUTION / 2)
if bucket >= 0 and bucket < len(timings):
timings[bucket] += 1
all_timings.append(time_diff)
# if it's a stream, record the timing
if 'stream' in cur_obj.tags:
if stream_num >= len(stream_timings):
stream_timings.append([])
stream_timings[stream_num].append(time_diff)
stream_num += 1
else:
stream_num = 0
# if the scoreval is 100 or 50, add it to the timeline
if score_val == '100' or score_val == '50':
timeline.append({ \
't': time, \
'event': score_val, \
'timing': time_diff, \
'xi': xi, \
'yi': yi
})
prev_obj = cur_obj
cur_obj = None
else:
# wasted a click
extra_inputs.append(cur_input)
# hit object expires
if cur_obj != None and time > cur_obj.time + WINDOW[2]:
event = { \
't': cur_obj.time, \
'event': 'miss', \
'timing': 0, \
'xi': -1, \
'yi': -1 \
}
timeline.append(event)
missed_notes.append({
'prev': prev_obj, \
'cur': cur_obj, \
'event': event \
})
prev_obj = cur_obj
cur_obj = None
# pop in the next object if there's a vacancy
if len(objects) > 0:
next_obj = objects[0]
if cur_obj == None and in_window(next_obj, time, WINDOW):
cur_obj = objects.popleft()
# try to match up missed notes to nearest hit attempts
for note in missed_notes:
cur_obj = note['cur']
prev_obj = note['prev']
event = note['event']
for cur_input in extra_inputs:
if in_window(cur_obj, cur_input['time'], WINDOW):
# print('Paired (%f, %f) -> (%d, %f, %f) with (%d, %f, %f)' % (prev_obj.x, prev_obj.y, cur_obj.time, cur_obj.x, cur_obj.y, cur_input['time'], cur_input['x'], cur_input['y']))
# print('%f > %f' % (dist(cur_input, cur_obj), RADIUS))
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
# print('(%d, %d)' % (xi, yi))
time_diff = cur_input['time'] - cur_obj.time
event['timing'] = time_diff
event['xi'] = xi
event['yi'] = yi
# done parsing! now to format the json
# get streaming averages
stream_avg = [sum(l) / len(l) for l in stream_timings]
# get unstable rate
unstable_rate = np.std(all_timings) * 10
result = deepcopy(replay)
result.pop('replay_data')
result['timeline'] = timeline
result['keys'] = dict(keys)
result['hitmap'] = [int(i) for i in np.ravel(hitmap).tolist()]
result['hitmap_resolution'] = HITMAP_RESOLUTION
result['hitmap_size'] = HITMAP_SIZE
result['circle_size'] = RADIUS
result['timings'] = [int(i) for i in timings.tolist()]
result['stream_timings'] = stream_avg
result['unstable_rate'] = unstable_rate
result['beatmap'] = difficulty
return result
def plot_hitmap(hitmap):
import matplotlib.pyplot as plt
res = len(hitmap)
mods = replay['mods']
csr = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
fig, axis = plt.subplots()
heatmap = axis.pcolor(hitmap, cmap=plt.cm.viridis, alpha=1.0)
circle = plt.Circle((HITMAP_RESOLUTION/2, HITMAP_RESOLUTION/2), \
csr/HITMAP_SIZE*HITMAP_RESOLUTION, color='red', fill=False)
fig.gca().add_artist(circle);
axis.set_aspect('equal')
plt.xlim(0, res)
plt.ylim(0, res)
plt.show();
def get_beatmap_id(bm_hash):
# api call to find the beatmap id
apiurl = 'https://osu.ppy.sh/api/get_beatmaps?'
key = open('apikey').read().strip()
url = apiurl + 'k=' + key + '&h=' + bm_hash
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
return None
res = str(response.read(), 'utf-8')
jsonRes = json.loads(res)
res = jsonRes[0]
return (res['beatmap_id'], res['beatmapset_id'], res['difficultyrating'])
if __name__ == '__main__':
# bm_file = 'data/granat.osu'
# rp_file = 'data/granat_extra.osr'
# bm_file = 'data/junshin_always.osu'
# rp_file = 'data/junshin_always_colorful.osr'
# bm_file = 'data/darling_insane.osu'
# rp_file = 'data/darling_insane.osr'
rp_file = sys.argv[1]
replay = parseReplay(open(rp_file, 'rb').read())
if replay['mode'] != 0:
|
# attempt to locate beatmap file in /data
bm_hash = replay['beatmap_md5']
bm_path = 'data/' + bm_hash + '.osu'
bm_file = None
bm_tuple = get_beatmap_id(bm_hash)
if bm_tuple == None:
print(json.dumps({'error': 'Could not access the osu! api at this time. Please try again in a bit.'}))
sys.exit(0);
bm_id, bm_set_id, sd = bm_tuple
if os.path.isfile(bm_path):
bm_file = open(bm_path)
else:
# download the beatmap file to the local file system
if bm_id != None:
urllib.request.urlretrieve('https://osu.ppy.sh/osu/' + bm_id, bm_path)
bm_file = open(bm_path)
if bm_file == None:
print(json.dumps({'error': 'Invalid beatmap hash: beatmap not found'}))
replay_file = open(rp_file, 'rb').read()
objects, beatmap = parse_osu(bm_file)
beatmap['beatmap_id'] = bm_id
beatmap['beatmap_set_id'] = bm_set_id
beatmap['beatmap_md5'] = bm_hash
beatmap['sd'] = sd
results = simulate(objects, beatmap, replay)
print(json.dumps(results))
# plot_hitmap(np.reshape(results['hitmap'], (HITMAP_RESOLUTION, HITMAP_RESOLUTION))
| print(json.dumps({'error': 'Unsupported game mode.'}))
sys.exit(0) | conditional_block |
parser.py | from struct import unpack_from
from dbparse import parseReplay
from collections import Counter, deque
import numpy as np
import math
import json
import urllib.request
import sys
import os
from copy import deepcopy
# constants
HITMAP_RESOLUTION = 64
HITMAP_SIZE = 128
TIMING_RESOLUTION = 64
class ModeError(Exception):
def __init__(self, value):
self.mode = value
def __str__(self):
return repr(self.mode)
class HitObject:
x = -1
y = -1
time = -1
lenient = False
def __init__(self, x, y, time, lenient):
self.x = x
self.y = y
self.time = time
self.lenient = lenient
self.tags = []
def add_tag(self, tag):
if tag not in self.tags:
self.tags.append(tag)
def __str__(self):
return '(%d, %d, %d, %s)' % \
(self.time, self.x, self.y, self.tags) |
def __init__(self, time, mpb):
self.time = time
self.mpb = mpb
def parse_object(line):
params = line.split(',')
x = float(params[0])
y = float(params[1])
time = int(params[2])
objtype = int(params[3])
# hit circle
if (objtype & 1) != 0:
return HitObject(x, y, time, False)
# sliders
elif (objtype & 2) != 0:
return HitObject(x, y, time, True)
# ignore spinners
else:
return None
"""
Takes a beatmap file as input, and outputs a list of
beatmap objects, sorted by their time offset.
"""
def parse_osu(osu):
objects = []
timing_points = []
beatmap = {}
in_objects = False
in_timings = False
# parse the osu! file
for line in osu:
if 'CircleSize' in line:
beatmap['cs'] = float(line.split(':')[1])
elif 'OverallDifficulty' in line:
beatmap['od'] = float(line.split(':')[1])
elif 'HPDrainRate' in line:
beatmap['hp'] = float(line.split(':')[1])
elif 'ApproachRate' in line:
beatmap['ar'] = float(line.split(':')[1])
elif 'Mode' in line:
mode = int(line.split(':')[1])
if mode != 0:
raise ModeError(mode)
elif 'Title' in line and 'Unicode' not in line:
beatmap['title'] = line.split(':')[1].strip()
beatmap['title_lower'] = beatmap['title'].lower()
elif 'Version' in line:
beatmap['version'] = line.split(':')[1].strip()
beatmap['version_lower'] = beatmap['version'].lower()
elif 'Artist' in line and 'Unicode' not in line:
beatmap['artist'] = line.split(':')[1].strip()
beatmap['artist_lower'] = beatmap['artist'].lower()
elif 'Creator' in line:
beatmap['creator'] = line.split(':')[1].strip()
beatmap['creator_lower'] = beatmap['creator'].lower()
elif 'BeatmapID' in line:
beatmap['beatmap_id'] = line.split(':')[1].strip()
elif 'BeatmapSetID' in line:
beatmap['beatmap_set_id'] = line.split(':')[1].strip()
elif '[TimingPoints]' in line:
in_timings = True
elif in_timings:
if line.strip() == '':
in_timings = False
continue
args = line.split(',')
time = float(args[0])
mpb = float(args[1])
if mpb > 0:
pt = TimingPoint(time, mpb)
timing_points.append(pt)
if '[HitObjects]' in line:
in_objects = True
elif in_objects:
obj = parse_object(line)
if obj != None:
objects.append(obj)
# find streams
for i in range(len(objects) - 1):
obj0 = objects[i]
obj1 = objects[i+1]
# get current mpb
mpb = -1
for t in timing_points:
mpb = t.mpb
if obj0.time >= t.time:
break
timing_diff = obj1.time - obj0.time
# print(str(timing_diff) + ' ' + str(mpb/4 + 10))
if timing_diff < mpb/4.0 + 10.0:
obj0.add_tag('stream')
obj1.add_tag('stream')
return (objects, beatmap)
# get the timing window for a note with the given OD and mods
def timing_window(od, hd, ez):
mod_od = od
if ez:
mod_od = 0.5 * od
elif hd:
mod_od = min(1.4 * od, 10)
w300 = 79.5 - 6.0 * mod_od
w100 = 139.5 - 8.0 * mod_od
w50 = 199.5 - 10.0 * mod_od
return (w300, w100, w50)
def in_window(obj, time, window):
return obj.time - window[2] <= time and \
obj.time + window[2] >= time
def pushed_buttons(prev_input, cur_input):
buttons = []
for k in ['K1', 'K2', 'M1', 'M2']:
if cur_input['keys'][k] and not prev_input['keys'][k]:
buttons.append(k)
return buttons
def circle_radius(cs, hd, ez):
mod_cs = cs
if hd:
mod_cs *= 1.3
elif ez:
mod_cs /= 2
return (104.0 - mod_cs * 8.0) / 2.0
def dist(p_input, obj):
return math.sqrt(math.pow(p_input['x'] - obj.x, 2) + \
math.pow(p_input['y'] - obj.y, 2))
def score_hit(time, obj, window):
if obj.lenient and abs(time - obj.time) <= window[2]:
return '300'
if abs(time - obj.time) <= window[0]:
return '300'
elif abs(time - obj.time) <= window[1]:
return '100'
elif abs(time - obj.time) <= window[2]:
return '50'
return 'welp'
def transform_coords(cur_input, prev_obj, cur_obj):
dx = cur_input['x'] - cur_obj.x
dy = cur_input['y'] - cur_obj.y
theta = math.pi / 2.0
if prev_obj != None:
thetaprime = math.atan2(cur_obj.y - prev_obj.y, cur_obj.x - prev_obj.x)
theta = math.pi / 2.0 - thetaprime
# get the rotation matrix
a = math.cos(theta)
b = math.sin(theta)
R = np.matrix([[a, -b], [b, a]])
# apply the rotation matrix to the coordinates
coords = np.ravel(R * np.matrix([[dx], [dy]]))
# remap to hitmap pixel coordinates
coords += HITMAP_SIZE / 2
# one last remapping to hitmap index
xi = int(coords[0] / HITMAP_SIZE * HITMAP_RESOLUTION)
yi = int(coords[1] / HITMAP_SIZE * HITMAP_RESOLUTION)
return(xi, yi)
"""
Simulates the game, collecting statistics on the way.
"""
def simulate(objects, difficulty, replay):
mods = replay['mods']
WINDOW = timing_window(difficulty['od'], mods['hard_rock'], mods['easy'])
RADIUS = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
replay_data = replay['replay_data']
end_time = max([objects[-1].time, replay_data[-1]['time']])
difficulty['length'] = objects[-1].time
# for o in replay_data:
# if o['time'] > 49500 and o['time'] < 49700:
# print(o)
# iteration variables
inputs = deque(replay_data)
objects = deque(objects)
cur_input = {'time': -1, 'keys': \
{'M1': False, 'M2': False, 'K1': False, 'K2': False}}
prev_obj = None
cur_obj = None
marked = False
# stats variables
timeline = []
keys = {'M1': 0, 'M2': 0, 'K1': 0, 'K2': 0}
hitmap = np.zeros((HITMAP_RESOLUTION, HITMAP_RESOLUTION))
timings = np.zeros(TIMING_RESOLUTION)
stream_num = 0
stream_timings = []
all_timings = []
extra_inputs = []
missed_notes = []
# first, reverse y axis if hr
if mods['hard_rock']:
for o in objects:
o.y = 384 - o.y
for time in range(end_time):
# check if input advances
if len(inputs) > 0:
next_input = inputs[0]
if time > next_input['time']:
prev_input = cur_input
cur_input = inputs.popleft()
# check if player pushed a button
buttons = pushed_buttons(prev_input, cur_input)
if len(buttons) > 0:
# add the pressed key to stats
for k in buttons:
keys[k] += 1
# check if player hit current hitobject
if cur_obj != None and dist(cur_input, cur_obj) < RADIUS:
# it's a hit!
score_val = score_hit(time, cur_obj, WINDOW)
time_diff = time - cur_obj.time
# if cur_obj.time > 10000 and cur_obj.time < 11000:
# print('%d - %d' % (cur_input['time'], cur_obj.time))
# get the x and y hitmap coords
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
hitmap[yi][xi] += 1
# get the timing bucket
bucket = int(time_diff / (WINDOW[2] * 2) * \
TIMING_RESOLUTION) + int(TIMING_RESOLUTION / 2)
if bucket >= 0 and bucket < len(timings):
timings[bucket] += 1
all_timings.append(time_diff)
# if it's a stream, record the timing
if 'stream' in cur_obj.tags:
if stream_num >= len(stream_timings):
stream_timings.append([])
stream_timings[stream_num].append(time_diff)
stream_num += 1
else:
stream_num = 0
# if the scoreval is 100 or 50, add it to the timeline
if score_val == '100' or score_val == '50':
timeline.append({ \
't': time, \
'event': score_val, \
'timing': time_diff, \
'xi': xi, \
'yi': yi
})
prev_obj = cur_obj
cur_obj = None
else:
# wasted a click
extra_inputs.append(cur_input)
# hit object expires
if cur_obj != None and time > cur_obj.time + WINDOW[2]:
event = { \
't': cur_obj.time, \
'event': 'miss', \
'timing': 0, \
'xi': -1, \
'yi': -1 \
}
timeline.append(event)
missed_notes.append({
'prev': prev_obj, \
'cur': cur_obj, \
'event': event \
})
prev_obj = cur_obj
cur_obj = None
# pop in the next object if there's a vacancy
if len(objects) > 0:
next_obj = objects[0]
if cur_obj == None and in_window(next_obj, time, WINDOW):
cur_obj = objects.popleft()
# try to match up missed notes to nearest hit attempts
for note in missed_notes:
cur_obj = note['cur']
prev_obj = note['prev']
event = note['event']
for cur_input in extra_inputs:
if in_window(cur_obj, cur_input['time'], WINDOW):
# print('Paired (%f, %f) -> (%d, %f, %f) with (%d, %f, %f)' % (prev_obj.x, prev_obj.y, cur_obj.time, cur_obj.x, cur_obj.y, cur_input['time'], cur_input['x'], cur_input['y']))
# print('%f > %f' % (dist(cur_input, cur_obj), RADIUS))
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
# print('(%d, %d)' % (xi, yi))
time_diff = cur_input['time'] - cur_obj.time
event['timing'] = time_diff
event['xi'] = xi
event['yi'] = yi
# done parsing! now to format the json
# get streaming averages
stream_avg = [sum(l) / len(l) for l in stream_timings]
# get unstable rate
unstable_rate = np.std(all_timings) * 10
result = deepcopy(replay)
result.pop('replay_data')
result['timeline'] = timeline
result['keys'] = dict(keys)
result['hitmap'] = [int(i) for i in np.ravel(hitmap).tolist()]
result['hitmap_resolution'] = HITMAP_RESOLUTION
result['hitmap_size'] = HITMAP_SIZE
result['circle_size'] = RADIUS
result['timings'] = [int(i) for i in timings.tolist()]
result['stream_timings'] = stream_avg
result['unstable_rate'] = unstable_rate
result['beatmap'] = difficulty
return result
def plot_hitmap(hitmap):
import matplotlib.pyplot as plt
res = len(hitmap)
mods = replay['mods']
csr = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
fig, axis = plt.subplots()
heatmap = axis.pcolor(hitmap, cmap=plt.cm.viridis, alpha=1.0)
circle = plt.Circle((HITMAP_RESOLUTION/2, HITMAP_RESOLUTION/2), \
csr/HITMAP_SIZE*HITMAP_RESOLUTION, color='red', fill=False)
fig.gca().add_artist(circle);
axis.set_aspect('equal')
plt.xlim(0, res)
plt.ylim(0, res)
plt.show();
def get_beatmap_id(bm_hash):
# api call to find the beatmap id
apiurl = 'https://osu.ppy.sh/api/get_beatmaps?'
key = open('apikey').read().strip()
url = apiurl + 'k=' + key + '&h=' + bm_hash
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
return None
res = str(response.read(), 'utf-8')
jsonRes = json.loads(res)
res = jsonRes[0]
return (res['beatmap_id'], res['beatmapset_id'], res['difficultyrating'])
if __name__ == '__main__':
# bm_file = 'data/granat.osu'
# rp_file = 'data/granat_extra.osr'
# bm_file = 'data/junshin_always.osu'
# rp_file = 'data/junshin_always_colorful.osr'
# bm_file = 'data/darling_insane.osu'
# rp_file = 'data/darling_insane.osr'
rp_file = sys.argv[1]
replay = parseReplay(open(rp_file, 'rb').read())
if replay['mode'] != 0:
print(json.dumps({'error': 'Unsupported game mode.'}))
sys.exit(0)
# attempt to locate beatmap file in /data
bm_hash = replay['beatmap_md5']
bm_path = 'data/' + bm_hash + '.osu'
bm_file = None
bm_tuple = get_beatmap_id(bm_hash)
if bm_tuple == None:
print(json.dumps({'error': 'Could not access the osu! api at this time. Please try again in a bit.'}))
sys.exit(0);
bm_id, bm_set_id, sd = bm_tuple
if os.path.isfile(bm_path):
bm_file = open(bm_path)
else:
# download the beatmap file to the local file system
if bm_id != None:
urllib.request.urlretrieve('https://osu.ppy.sh/osu/' + bm_id, bm_path)
bm_file = open(bm_path)
if bm_file == None:
print(json.dumps({'error': 'Invalid beatmap hash: beatmap not found'}))
replay_file = open(rp_file, 'rb').read()
objects, beatmap = parse_osu(bm_file)
beatmap['beatmap_id'] = bm_id
beatmap['beatmap_set_id'] = bm_set_id
beatmap['beatmap_md5'] = bm_hash
beatmap['sd'] = sd
results = simulate(objects, beatmap, replay)
print(json.dumps(results))
# plot_hitmap(np.reshape(results['hitmap'], (HITMAP_RESOLUTION, HITMAP_RESOLUTION)) |
class TimingPoint:
time = -1
mpb = -1 | random_line_split |
parser.py | from struct import unpack_from
from dbparse import parseReplay
from collections import Counter, deque
import numpy as np
import math
import json
import urllib.request
import sys
import os
from copy import deepcopy
# constants
HITMAP_RESOLUTION = 64
HITMAP_SIZE = 128
TIMING_RESOLUTION = 64
class ModeError(Exception):
def __init__(self, value):
self.mode = value
def | (self):
return repr(self.mode)
class HitObject:
x = -1
y = -1
time = -1
lenient = False
def __init__(self, x, y, time, lenient):
self.x = x
self.y = y
self.time = time
self.lenient = lenient
self.tags = []
def add_tag(self, tag):
if tag not in self.tags:
self.tags.append(tag)
def __str__(self):
return '(%d, %d, %d, %s)' % \
(self.time, self.x, self.y, self.tags)
class TimingPoint:
time = -1
mpb = -1
def __init__(self, time, mpb):
self.time = time
self.mpb = mpb
def parse_object(line):
params = line.split(',')
x = float(params[0])
y = float(params[1])
time = int(params[2])
objtype = int(params[3])
# hit circle
if (objtype & 1) != 0:
return HitObject(x, y, time, False)
# sliders
elif (objtype & 2) != 0:
return HitObject(x, y, time, True)
# ignore spinners
else:
return None
"""
Takes a beatmap file as input, and outputs a list of
beatmap objects, sorted by their time offset.
"""
def parse_osu(osu):
objects = []
timing_points = []
beatmap = {}
in_objects = False
in_timings = False
# parse the osu! file
for line in osu:
if 'CircleSize' in line:
beatmap['cs'] = float(line.split(':')[1])
elif 'OverallDifficulty' in line:
beatmap['od'] = float(line.split(':')[1])
elif 'HPDrainRate' in line:
beatmap['hp'] = float(line.split(':')[1])
elif 'ApproachRate' in line:
beatmap['ar'] = float(line.split(':')[1])
elif 'Mode' in line:
mode = int(line.split(':')[1])
if mode != 0:
raise ModeError(mode)
elif 'Title' in line and 'Unicode' not in line:
beatmap['title'] = line.split(':')[1].strip()
beatmap['title_lower'] = beatmap['title'].lower()
elif 'Version' in line:
beatmap['version'] = line.split(':')[1].strip()
beatmap['version_lower'] = beatmap['version'].lower()
elif 'Artist' in line and 'Unicode' not in line:
beatmap['artist'] = line.split(':')[1].strip()
beatmap['artist_lower'] = beatmap['artist'].lower()
elif 'Creator' in line:
beatmap['creator'] = line.split(':')[1].strip()
beatmap['creator_lower'] = beatmap['creator'].lower()
elif 'BeatmapID' in line:
beatmap['beatmap_id'] = line.split(':')[1].strip()
elif 'BeatmapSetID' in line:
beatmap['beatmap_set_id'] = line.split(':')[1].strip()
elif '[TimingPoints]' in line:
in_timings = True
elif in_timings:
if line.strip() == '':
in_timings = False
continue
args = line.split(',')
time = float(args[0])
mpb = float(args[1])
if mpb > 0:
pt = TimingPoint(time, mpb)
timing_points.append(pt)
if '[HitObjects]' in line:
in_objects = True
elif in_objects:
obj = parse_object(line)
if obj != None:
objects.append(obj)
# find streams
for i in range(len(objects) - 1):
obj0 = objects[i]
obj1 = objects[i+1]
# get current mpb
mpb = -1
for t in timing_points:
mpb = t.mpb
if obj0.time >= t.time:
break
timing_diff = obj1.time - obj0.time
# print(str(timing_diff) + ' ' + str(mpb/4 + 10))
if timing_diff < mpb/4.0 + 10.0:
obj0.add_tag('stream')
obj1.add_tag('stream')
return (objects, beatmap)
# get the timing window for a note with the given OD and mods
def timing_window(od, hd, ez):
mod_od = od
if ez:
mod_od = 0.5 * od
elif hd:
mod_od = min(1.4 * od, 10)
w300 = 79.5 - 6.0 * mod_od
w100 = 139.5 - 8.0 * mod_od
w50 = 199.5 - 10.0 * mod_od
return (w300, w100, w50)
def in_window(obj, time, window):
return obj.time - window[2] <= time and \
obj.time + window[2] >= time
def pushed_buttons(prev_input, cur_input):
buttons = []
for k in ['K1', 'K2', 'M1', 'M2']:
if cur_input['keys'][k] and not prev_input['keys'][k]:
buttons.append(k)
return buttons
def circle_radius(cs, hd, ez):
mod_cs = cs
if hd:
mod_cs *= 1.3
elif ez:
mod_cs /= 2
return (104.0 - mod_cs * 8.0) / 2.0
def dist(p_input, obj):
return math.sqrt(math.pow(p_input['x'] - obj.x, 2) + \
math.pow(p_input['y'] - obj.y, 2))
def score_hit(time, obj, window):
if obj.lenient and abs(time - obj.time) <= window[2]:
return '300'
if abs(time - obj.time) <= window[0]:
return '300'
elif abs(time - obj.time) <= window[1]:
return '100'
elif abs(time - obj.time) <= window[2]:
return '50'
return 'welp'
def transform_coords(cur_input, prev_obj, cur_obj):
dx = cur_input['x'] - cur_obj.x
dy = cur_input['y'] - cur_obj.y
theta = math.pi / 2.0
if prev_obj != None:
thetaprime = math.atan2(cur_obj.y - prev_obj.y, cur_obj.x - prev_obj.x)
theta = math.pi / 2.0 - thetaprime
# get the rotation matrix
a = math.cos(theta)
b = math.sin(theta)
R = np.matrix([[a, -b], [b, a]])
# apply the rotation matrix to the coordinates
coords = np.ravel(R * np.matrix([[dx], [dy]]))
# remap to hitmap pixel coordinates
coords += HITMAP_SIZE / 2
# one last remapping to hitmap index
xi = int(coords[0] / HITMAP_SIZE * HITMAP_RESOLUTION)
yi = int(coords[1] / HITMAP_SIZE * HITMAP_RESOLUTION)
return(xi, yi)
"""
Simulates the game, collecting statistics on the way.
"""
def simulate(objects, difficulty, replay):
mods = replay['mods']
WINDOW = timing_window(difficulty['od'], mods['hard_rock'], mods['easy'])
RADIUS = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
replay_data = replay['replay_data']
end_time = max([objects[-1].time, replay_data[-1]['time']])
difficulty['length'] = objects[-1].time
# for o in replay_data:
# if o['time'] > 49500 and o['time'] < 49700:
# print(o)
# iteration variables
inputs = deque(replay_data)
objects = deque(objects)
cur_input = {'time': -1, 'keys': \
{'M1': False, 'M2': False, 'K1': False, 'K2': False}}
prev_obj = None
cur_obj = None
marked = False
# stats variables
timeline = []
keys = {'M1': 0, 'M2': 0, 'K1': 0, 'K2': 0}
hitmap = np.zeros((HITMAP_RESOLUTION, HITMAP_RESOLUTION))
timings = np.zeros(TIMING_RESOLUTION)
stream_num = 0
stream_timings = []
all_timings = []
extra_inputs = []
missed_notes = []
# first, reverse y axis if hr
if mods['hard_rock']:
for o in objects:
o.y = 384 - o.y
for time in range(end_time):
# check if input advances
if len(inputs) > 0:
next_input = inputs[0]
if time > next_input['time']:
prev_input = cur_input
cur_input = inputs.popleft()
# check if player pushed a button
buttons = pushed_buttons(prev_input, cur_input)
if len(buttons) > 0:
# add the pressed key to stats
for k in buttons:
keys[k] += 1
# check if player hit current hitobject
if cur_obj != None and dist(cur_input, cur_obj) < RADIUS:
# it's a hit!
score_val = score_hit(time, cur_obj, WINDOW)
time_diff = time - cur_obj.time
# if cur_obj.time > 10000 and cur_obj.time < 11000:
# print('%d - %d' % (cur_input['time'], cur_obj.time))
# get the x and y hitmap coords
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
hitmap[yi][xi] += 1
# get the timing bucket
bucket = int(time_diff / (WINDOW[2] * 2) * \
TIMING_RESOLUTION) + int(TIMING_RESOLUTION / 2)
if bucket >= 0 and bucket < len(timings):
timings[bucket] += 1
all_timings.append(time_diff)
# if it's a stream, record the timing
if 'stream' in cur_obj.tags:
if stream_num >= len(stream_timings):
stream_timings.append([])
stream_timings[stream_num].append(time_diff)
stream_num += 1
else:
stream_num = 0
# if the scoreval is 100 or 50, add it to the timeline
if score_val == '100' or score_val == '50':
timeline.append({ \
't': time, \
'event': score_val, \
'timing': time_diff, \
'xi': xi, \
'yi': yi
})
prev_obj = cur_obj
cur_obj = None
else:
# wasted a click
extra_inputs.append(cur_input)
# hit object expires
if cur_obj != None and time > cur_obj.time + WINDOW[2]:
event = { \
't': cur_obj.time, \
'event': 'miss', \
'timing': 0, \
'xi': -1, \
'yi': -1 \
}
timeline.append(event)
missed_notes.append({
'prev': prev_obj, \
'cur': cur_obj, \
'event': event \
})
prev_obj = cur_obj
cur_obj = None
# pop in the next object if there's a vacancy
if len(objects) > 0:
next_obj = objects[0]
if cur_obj == None and in_window(next_obj, time, WINDOW):
cur_obj = objects.popleft()
# try to match up missed notes to nearest hit attempts
for note in missed_notes:
cur_obj = note['cur']
prev_obj = note['prev']
event = note['event']
for cur_input in extra_inputs:
if in_window(cur_obj, cur_input['time'], WINDOW):
# print('Paired (%f, %f) -> (%d, %f, %f) with (%d, %f, %f)' % (prev_obj.x, prev_obj.y, cur_obj.time, cur_obj.x, cur_obj.y, cur_input['time'], cur_input['x'], cur_input['y']))
# print('%f > %f' % (dist(cur_input, cur_obj), RADIUS))
xi, yi = transform_coords(cur_input, prev_obj, cur_obj)
# print('(%d, %d)' % (xi, yi))
time_diff = cur_input['time'] - cur_obj.time
event['timing'] = time_diff
event['xi'] = xi
event['yi'] = yi
# done parsing! now to format the json
# get streaming averages
stream_avg = [sum(l) / len(l) for l in stream_timings]
# get unstable rate
unstable_rate = np.std(all_timings) * 10
result = deepcopy(replay)
result.pop('replay_data')
result['timeline'] = timeline
result['keys'] = dict(keys)
result['hitmap'] = [int(i) for i in np.ravel(hitmap).tolist()]
result['hitmap_resolution'] = HITMAP_RESOLUTION
result['hitmap_size'] = HITMAP_SIZE
result['circle_size'] = RADIUS
result['timings'] = [int(i) for i in timings.tolist()]
result['stream_timings'] = stream_avg
result['unstable_rate'] = unstable_rate
result['beatmap'] = difficulty
return result
def plot_hitmap(hitmap):
import matplotlib.pyplot as plt
res = len(hitmap)
mods = replay['mods']
csr = circle_radius(difficulty['cs'], mods['hard_rock'], mods['easy'])
fig, axis = plt.subplots()
heatmap = axis.pcolor(hitmap, cmap=plt.cm.viridis, alpha=1.0)
circle = plt.Circle((HITMAP_RESOLUTION/2, HITMAP_RESOLUTION/2), \
csr/HITMAP_SIZE*HITMAP_RESOLUTION, color='red', fill=False)
fig.gca().add_artist(circle);
axis.set_aspect('equal')
plt.xlim(0, res)
plt.ylim(0, res)
plt.show();
def get_beatmap_id(bm_hash):
# api call to find the beatmap id
apiurl = 'https://osu.ppy.sh/api/get_beatmaps?'
key = open('apikey').read().strip()
url = apiurl + 'k=' + key + '&h=' + bm_hash
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
return None
res = str(response.read(), 'utf-8')
jsonRes = json.loads(res)
res = jsonRes[0]
return (res['beatmap_id'], res['beatmapset_id'], res['difficultyrating'])
if __name__ == '__main__':
# bm_file = 'data/granat.osu'
# rp_file = 'data/granat_extra.osr'
# bm_file = 'data/junshin_always.osu'
# rp_file = 'data/junshin_always_colorful.osr'
# bm_file = 'data/darling_insane.osu'
# rp_file = 'data/darling_insane.osr'
rp_file = sys.argv[1]
replay = parseReplay(open(rp_file, 'rb').read())
if replay['mode'] != 0:
print(json.dumps({'error': 'Unsupported game mode.'}))
sys.exit(0)
# attempt to locate beatmap file in /data
bm_hash = replay['beatmap_md5']
bm_path = 'data/' + bm_hash + '.osu'
bm_file = None
bm_tuple = get_beatmap_id(bm_hash)
if bm_tuple == None:
print(json.dumps({'error': 'Could not access the osu! api at this time. Please try again in a bit.'}))
sys.exit(0);
bm_id, bm_set_id, sd = bm_tuple
if os.path.isfile(bm_path):
bm_file = open(bm_path)
else:
# download the beatmap file to the local file system
if bm_id != None:
urllib.request.urlretrieve('https://osu.ppy.sh/osu/' + bm_id, bm_path)
bm_file = open(bm_path)
if bm_file == None:
print(json.dumps({'error': 'Invalid beatmap hash: beatmap not found'}))
replay_file = open(rp_file, 'rb').read()
objects, beatmap = parse_osu(bm_file)
beatmap['beatmap_id'] = bm_id
beatmap['beatmap_set_id'] = bm_set_id
beatmap['beatmap_md5'] = bm_hash
beatmap['sd'] = sd
results = simulate(objects, beatmap, replay)
print(json.dumps(results))
# plot_hitmap(np.reshape(results['hitmap'], (HITMAP_RESOLUTION, HITMAP_RESOLUTION))
| __str__ | identifier_name |
changeLanguage.js | const english = document.querySelector('.eng')
const polish = document.querySelector('.pl')
polish.addEventListener('click', function () {
document.querySelector('.hey').textContent = "Witaj! Nazywam się Piotr Ludew"
document.querySelector('.front').textContent = "i chcę pracować jako Front-End Developer"
document.querySelector('.textComing1').textContent = "Co"
document.querySelector('.textComing2').textContent = "potrafię?"
document.querySelector('.advanced1').textContent = "Poziom zaawansowania"
document.querySelector('.advanced2').textContent = "Poziom zaawansowania"
document.querySelector('.advanced3').textContent = "Poziom zaawansowania"
document.querySelector('.advanced4').textContent = "Poziom zaawansowania"
document.querySelector('.advanced5').textContent = "Poziom zaawansowania"
document.querySelector('.advanced6').textContent = "Poziom zaawansowania"
document.querySelector('.advanced7').textContent = "Poziom zaawansowania"
document.querySelector('.advanced8').textContent = "Poziom zaawansowania"
document.querySelector('.advanced1').style.margin = "0px"
document.querySelector('.advanced2').style.margin = "0px"
document.querySelector('.advanced3').style.margin = "0px"
document.querySelector('.advanced4').style.margin = "0px"
document.querySelector('.advanced5').style.margin = "0px"
document.querySelector('.advanced6').style.margin = "0px"
document.querySelector('.advanced7').style.margin = "0px"
document.querySelector('.advanced8').style.margin = "0px" |
document.querySelector('.pro1').textContent = "Zaawansowany"
document.querySelector('.pro6').textContent = "Zaawansowany"
document.querySelector('.headerAbout').textContent = "TROSZKĘ O MOJEJ PRACY..."
//EDIT
document.querySelector('.pAbout').textContent = "Jako programista przemysłowy, zajmuję się tworzeniem oprogramowania dla robotów, linii przemysłowych oraz systemów wizyjnych. Moim codziennym zadaniem jest odnajdywanie się w różnych środowiskach programistycznych i językach programowania, co wymaga wysokich zdolności analitycznego i programistycznego sposobu myślenia. Jak wiadomo elementem pracy programisty jest ciągła styczność z dokumentacją, także w innych językach np. włoskim. W pracy często wykorzystuję Pythona do automatyzacji zadań, czy tworzenia komunikacji po protokole TCP/IP. Rozpoczęcie pracy zawodowej zaraz po ukończeniu studiów oraz samodzielne doszkalanie się dały mi możliwość intensywnego rozwoju. W zaledwie półtora roku otrzymałem tytuł młodszego specjalisty i powierzono mi prowadzenie projektów w dużym koncernie spożywczym. Moja obecna praca sprowadza się do dużej ilości wyjazdów, dlatego przy podjęciu nowej zależy mi, aby była ona stacjonarna."
document.querySelector('.h3YouTube').textContent = "CO ROBIĘ W PRAKTYCE?"
document.querySelector('.headerAbout2').textContent = "PASJA PROGRAMOWANIA..."
//EDIT
document.querySelector('.pAbout2').textContent = "Idea mojej pracy wbrew pozorom jest bardzo powiązana ze światem IT, dlatego też zadecydowałem o przebranżowieniu na Front-End Developera. Programowanie jest moją pasją, w której najbardziej lubię, gdy moja głowa każdego dnia „pali się” nad rozwiązaniem danego problemu. Od roku intensywnie pracuję nad nauką JavaScript i React, która podyktowana była potrzebą pisania oprogramowania na roboty współpracujące HCR oraz systemy wizyjne firmy COGNEX. Mam za sobą dużą ilość kursów HTML, CSS, co sprawia, że czuję się mocny w tych technologiach i nie straszna mi kaskadowość. Ponadto, stosuję responsywne tworzenie stron, oraz ideę mobile-first. Dzięki odpowiedniemu przygotowaniu uważam, że nadeszła pora, aby zająć się programowaniem komercyjnie. Jeśli dacie mi Państwo szansę, to obiecuję, że bardzo szybko stanę się samodzielnym, wykwalifikowanym pracownikiem. Jestem osobą, która szybko przyswaja wiedzę i umie wykorzystać ją w praktyce."
document.querySelector('.headerAbout3').textContent = "... I O MNIE"
//EDIT
document.querySelector('.h4About3').textContent = "Z natury jestem otwartym i wesołym człowiekiem. Moją największą zaletą jest pracowitość i dążenie do postawionego sobie celu. Wiąże się to jednak z moją największą wadą, jaką jest upartość. W wolnych chwilach, oprócz programowania, lubię grać na konsoli, oglądać mecze, filmy i seriale. Dbam również o aktywność fizyczną, gdy sprzyja pogoda śmigam na rolkach, czy gram w piłkę nożną z przyjaciółmi."
document.querySelector('.textPhone').textContent = "Koniecznie sprawdź moją stronę na swoim smartfonie!"
document.querySelector('.hContact').textContent = "kontakt"
document.querySelector('.sendMessage').textContent = "wyślij wiadomość"
$('input:text:nth-of-type(1)').attr('placeholder', 'Twoję imię...');
$('textarea').attr('placeholder', 'Twoja wiadomość...');
document.querySelector('.origin').textContent = "Początek"
document.querySelector('.abilities').textContent = "Umiejętności"
document.querySelector('.work').textContent = "Praca"
document.querySelector('.programmer').textContent = "Programowanie"
document.querySelector('.aboutMyLife').textContent = "O mnie"
document.querySelector('.form').textContent = "Kontakt"
})
english.addEventListener('click', function () {
document.querySelector('.hey').textContent = "Hi! My name is Piotr Ludew"
document.querySelector('.front').textContent = "I want to work as Front-End Developer"
document.querySelector('.textComing1').textContent = "My"
document.querySelector('.textComing2').textContent = "abilities"
document.querySelector('.advanced1').textContent = "Tech stack"
document.querySelector('.advanced1').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced2').textContent = "Tech stack"
document.querySelector('.advanced2').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced3').textContent = "Tech stack"
document.querySelector('.advanced3').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced4').textContent = "Tech stack"
document.querySelector('.advanced4').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced5').textContent = "Tech stack"
document.querySelector('.advanced5').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced6').textContent = "Tech stack"
document.querySelector('.advanced6').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced7').style.margin = "0px 70px 0px 70px"
document.querySelector('.advanced7').textContent = "Tech stack"
document.querySelector('.advanced8').textContent = "Tech stack"
document.querySelector('.advanced8').style.margin = "0px 70px 0px 70px"
document.querySelector('.pro1').textContent = "Advanced"
document.querySelector('.pro6').textContent = "Advanced"
document.querySelector('.headerAbout').textContent = "About my job..."
document.querySelector('.pAbout').textContent = "As an industrial programmer, I create software for robots, technological lines and vision systems in five-person team. My typical day is to find myself in various programming environments and computer languages, which requires the use of analytical skills and programming mindset. As you know, the job of a programmer is in constant work with the documentation, also in other languages, e.g. Italian in my case. At work, I often use Python to automate tasks or communicate over the TCP / IP protocol. I started my professional career right after graduating from university and self-training gave me the opportunity for intensive development. In just a year and a half, I received the title of junior specialist and was entrusted with running projects with a huge food concern. My current job involves a large number of trips and this is the reason why I want to change job."
document.querySelector('.h3YouTube').textContent = "What's about practice"
document.querySelector('.headerAbout2').textContent = "passion for coding..."
//EDIT
document.querySelector('.pAbout2').textContent = "The idea of my work is very related to the IT world, which is why I decided displacement to the Front-End Developer. Programming is my passion, which I like the most when my head is „on fire” every day over solving a coding problems. I have been working intensively on the learning of JavaScript and React, which was dictated by the need to implement the software for cooperating with human robots HCR and vision systems COGNEX. I have a lot of HTML, CSS courses behind me, which makes me feel strong in these technologies and I understand cascading completely . In addition, I use responsive website development and the mobile-first idea. Thanks with proper preparation, I believe it's time to start programming commercially. If you give me a chance, I promise that I will become independent and qualified very quickly worker. I am a person who quickly assimilates knowledge and knows how to use it in practice."
document.querySelector('.headerAbout3').textContent = "... and about me"
//EDIT
document.querySelector('.h4About3').textContent = "By nature I am an open and cheerful person. My greatest advantage is diligence and striving to achieve my goal. However, this is related to my biggest flaw, which is stubbornness. In my free time, apart from programming, I like to play on the console, watch matches, movies and TV series. I also take care of physical activity, when the weather is fine, I skate or play football with my friends."
document.querySelector('.textPhone').textContent = "You may to check out my website on your smartphone!"
document.querySelector('.hContact').textContent = "contact"
document.querySelector('.sendMessage').textContent = "send message"
$('input:text:nth-of-type(1)').attr('placeholder', 'Your name...');
$('textarea').attr('placeholder', 'Your message...');
document.querySelector('.origin').textContent = "Top"
document.querySelector('.abilities').textContent = "Abilities"
document.querySelector('.work').textContent = "Job"
document.querySelector('.programmer').textContent = "Coding"
document.querySelector('.aboutMyLife').textContent = "About Me"
document.querySelector('.form').textContent = "Contact"
}) | random_line_split |
|
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new() | t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)| !intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x| !max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store: !hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)| !layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))| !info1.dependents.is_empty() && layout_map.contains_key(&info1.id) && !layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)| !shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x| !shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store = !info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))| !rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)| !intersect.is_empty())
.max_by_key(|&(pos, ref intersect)| !intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if !self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
} | }
}
pub struct KnowledgeBase<T: ReteIntrospection> { | random_line_split |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) | else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)| !intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x| !max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store: !hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)| !layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))| !info1.dependents.is_empty() && layout_map.contains_key(&info1.id) && !layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)| !shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x| !shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store = !info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))| !rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)| !intersect.is_empty())
.max_by_key(|&(pos, ref intersect)| !intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if !self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} | conditional_block |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)| !intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x| !max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store: !hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)| !layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))| !info1.dependents.is_empty() && layout_map.contains_key(&info1.id) && !layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)| !shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x| !shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store = !info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))| !rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)| !intersect.is_empty())
.max_by_key(|&(pos, ref intersect)| !intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn | <I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> {
let rc = Rc::new(val);
if !self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
}
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| insert | identifier_name |
base.rs | use std::marker::PhantomData;
use std::rc::Rc;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use ::serial::SerialGen;
use ::traits::ReteIntrospection;
use ::builder::{AlphaTest, ConditionInfo, KnowledgeBuilder};
use ::network::ids::*;
use ::builders::ids::{StatementId, RuleId};
use runtime::memory::{AlphaMemoryId, MemoryId};
pub struct LayoutIdGenerator {
hash_eq_ids: HashEqIdGen,
alpha_ids: AlphaIdGen,
beta_ids: BetaIdGen
}
impl LayoutIdGenerator {
pub fn new() -> LayoutIdGenerator {
LayoutIdGenerator{
hash_eq_ids: Default::default(),
alpha_ids: Default::default(),
beta_ids: Default::default()
}
}
pub fn next_hash_eq_id(&mut self) -> HashEqId {
self.hash_eq_ids.next()
}
pub fn next_alpha_id(&mut self) -> AlphaId {
self.alpha_ids.next()
}
pub fn next_beta_id(&mut self) -> BetaId {
self.beta_ids.next()
}
}
impl Default for LayoutIdGenerator {
fn default() -> Self {
LayoutIdGenerator::new()
}
}
pub struct KnowledgeBase<T: ReteIntrospection> {
t: PhantomData<T>
}
impl<T: ReteIntrospection> KnowledgeBase<T> {
pub fn compile(builder: KnowledgeBuilder<T>) -> KnowledgeBase<T> {
let (string_repo, rules, condition_map) = builder.explode();
let (hash_eq_nodes, alpha_network, statement_memories) = Self::compile_alpha_network(condition_map);
let mut statement_rule_map = HashMap::new();
for (rule_id, rule) in rules {
for statement_id in &rule.statement_ids {
statement_rule_map.insert(*statement_id, rule_id);
}
}
KnowledgeBase{t: PhantomData}
}
fn compile_alpha_network(condition_map: HashMap<T::HashEq, HashMap<AlphaTest<T>, ConditionInfo>>)
-> (HashMap<HashEqId, (T::HashEq, HashEqNode)>, Vec<AlphaNode<T>>, HashMap<StatementId, MemoryId>) {
let mut conditions: Vec<_> = condition_map.into_iter().collect();
// Order conditions ascending by dependent statement count, then test count.
conditions.sort_by(|&(_, ref tests1), &(_, ref tests2)| {
if let (Some(ref hash1), Some(ref hash2)) = (tests1.get(&AlphaTest::HashEq), tests2.get(&AlphaTest::HashEq)) {
hash1.dependents.len().cmp(&hash2.dependents.len()).then(tests1.len().cmp(&tests2.len()))
} else {
unreachable!("Unexpected comparison. HashEq must be set");
}
});
let mut node_id_gen = LayoutIdGenerator::new();
let mut hash_eq_nodes = HashMap::new();
let mut statement_memories: HashMap<StatementId, MemoryId> = HashMap::new();
let mut alpha_network = Vec::new();
// Pop off the most shared & complex tests first and lay them out at the front of the network.
// That way they're more likely to be right next to each other
while let Some((hash_val, mut test_map)) = conditions.pop() {
let mut layout_map = HashMap::new();
// Take the HashEq node (our entry point) and exhaustively assign destination nodes until no more statements are shared.
let mut hash_eq_info = test_map.remove(&AlphaTest::HashEq).unwrap();
let hash_eq_id = node_id_gen.next_hash_eq_id();
let mut hash_eq_destinations: Vec<DestinationNode> = Vec::new();
// Lay down the node for the most shared nodes before the others
while let Some((max_info, max_intersection)) = test_map.iter()
.map(|(_, info)| info)
.map(|info| (info, &hash_eq_info.dependents & &info.dependents))
.filter(|&(_, ref intersection)| !intersection.is_empty())
.max_by_key(|&(_, ref intersection)| intersection.len()) {
let destination_id = layout_map.entry(max_info.id)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
hash_eq_info.dependents.retain(|x| !max_intersection.contains(&x));
hash_eq_destinations.push(destination_id.into());
}
// Add the HashEq node to the map && store any remaining statements for the beta network
hash_eq_nodes.insert(hash_eq_id, (hash_val, HashEqNode{id: hash_eq_id, store: !hash_eq_info.dependents.is_empty(), destinations: hash_eq_destinations}));
for statment_id in hash_eq_info.dependents {
statement_memories.insert(statment_id, hash_eq_id.into());
}
let mut tests: Vec<_> = test_map.into_iter().collect();
loop {
// Sort the remaining tests by layed-out vs not.
// TODO: sort by dependents.size, too. put that at the front
tests.sort_by_key(|&(_, ref info)| !layout_map.contains_key(&info.id));
println!("Layout: {:?}", layout_map);
println!("Sorted: {:?}", tests);
// Again, in order of most shared to least, lay down nodes
// TODO: when closure is cloneable, fix this to use cartisian product
let output = tests.iter().enumerate().tuple_combinations()
.filter(|&((_, &(_, ref info1)), (_, &(_, ref info2)))| !info1.dependents.is_empty() && layout_map.contains_key(&info1.id) && !layout_map.contains_key(&info2.id))
.map(|((pos1, &(_, ref info1)), (_, &(_, ref info2)))| (pos1, info1.id, info2.id, &info1.dependents & &info2.dependents))
.filter(|&(_, _, _, ref shared)| !shared.is_empty())
.max_by_key(|&(_, _, _, ref shared)| shared.len());
if let Some((pos1, id1, id2, shared)) = output {
let alpha2_id = layout_map.entry(id2)
.or_insert_with(|| NodeLayout{node_id: node_id_gen.next_alpha_id(), destinations: Default::default()})
.node_id;
layout_map.get_mut(&id1).unwrap().destinations.push(alpha2_id.into());
tests.get_mut(pos1).unwrap().1.dependents.retain(|x| !shared.contains(&x));
} else {
break;
}
}
println!("Final layout: {:?}", &layout_map);
// TODO: Assert layout numbers are correct
// Do the actual layout into the alpha network
tests.sort_by_key(|&(_, ref info)| layout_map.get(&info.id).unwrap().node_id);
for (test, info) in tests.into_iter() {
let alpha_layout = layout_map.remove(&info.id).unwrap();
let id = alpha_layout.node_id;
let dest = alpha_layout.destinations;
let store = !info.dependents.is_empty();
assert_eq!(alpha_network.len(), alpha_layout.node_id.index());
alpha_network.push(AlphaNode{id, test, store, dest});
for statment_id in info.dependents {
statement_memories.insert(statment_id, id.into());
}
}
}
println!("Conditions: {:?}", &conditions);
println!("HashEqNode: {:?}", &hash_eq_nodes);
println!("Memory map: {:?}", &statement_memories);
println!("Alpha Network: size {:?}", alpha_network.len());
(hash_eq_nodes, alpha_network, statement_memories)
}
fn compile_beta_network(statement_memories: &HashMap<StatementId, MemoryId>,
statement_rule_map: &HashMap<StatementId, RuleId>,
mut hash_eq_nodes: HashMap<HashEqId, (T::HashEq, HashEqNode)>,
mut alpha_network: Vec<AlphaNode<T>>) {
let mut beta_ids: SerialGen<usize, BetaId> = Default::default();
let mut memory_rule_map: HashMap<MemoryId, HashSet<RuleId>> = HashMap::new();
for (statement_id, memory_id) in statement_memories {
let rule_id = *statement_rule_map.get(statement_id).unwrap();
memory_rule_map
.entry(*memory_id)
.or_insert_with(|| Default::default()).insert(rule_id);
}
/*
let mut beta_network= Vec::new();
let mut beta_stack = Vec::new();
*/
// 1. Select (and remove from the map) the memory (m1) with the most rules
// 2. Select the next memory (m2) with the most shared rules
// 3a. Create a new AND beta node (b1) (in NodeLayout<BetaId>)
// 3b. Remove shared rules from m1 & m2. If either have no more rules, remove from map.
// 3c. Add b1's destination id to m1 and m2's destinations
// 3d. Add b1 to beta stack.
// 4. If an m2 can be found, go to 3a. Otherwise add rule to destination. pop b1 off beta stack
// 5. If stack empty, select next m2 for m1. if no m2, add rule ids as destination nodes. if no more m1 rules, remove from map
let mut alpha_mem_dependents: Vec<(MemoryId, HashSet<RuleId>)> = memory_rule_map.into_iter().collect();
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
while let Some((most_dep_id, mut most_dep)) = alpha_mem_dependents.pop() {
// early exit in case we've reached the front with no dependencies
if most_dep.is_empty() {
break;
}
while let Some((intersect_pos, intersect)) = alpha_mem_dependents.iter().enumerate().rev()
.filter(|&(_, &(_, ref rule_set))| !rule_set.is_empty())
.map(|(pos, &(_, ref rule_set))| (pos, &most_dep & rule_set))
.filter(|&(pos, ref intersect)| !intersect.is_empty())
.max_by_key(|&(pos, ref intersect)| !intersect.len()) {
// Join alpha nodes with beta
let beta_id = beta_ids.next();
most_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, most_dep_id, beta_id.into());
{
let &mut (intersect_id, ref mut intersect_dep) = alpha_mem_dependents.get_mut(intersect_pos).unwrap();
intersect_dep.retain(|x| !intersect.contains(x));
Self::add_alpha_destination(&mut hash_eq_nodes, &mut alpha_network, intersect_id, beta_id.into());
}
// TODO: Left off at creating new beta node
}
alpha_mem_dependents.sort_by_key(|&(_, ref rule_set)| rule_set.len());
}
}
fn add_alpha_destination(hash_eq_nodes: &mut HashMap<HashEqId, (T::HashEq, HashEqNode)>,
alpha_network: &mut Vec<AlphaNode<T>>,
memory: MemoryId,
destination: DestinationNode) {
use ::base::MemoryId::*;
match memory {
HashEq(ref id) => {hash_eq_nodes.get_mut(id).unwrap().1.destinations.push(destination)},
Alpha(alpha_id) => {alpha_network.get_mut(alpha_id.index()).unwrap().dest.push(destination)},
_ => unreachable!("We shouldn't be adding an beta memory destination with this function")
}
}
}
#[derive(Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
struct NodeLayout<T> {
node_id: T,
destinations: Vec<DestinationNode>
}
#[derive(Debug, Copy, Clone, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum DestinationNode {
Alpha(AlphaId),
Beta(BetaId),
Rule(RuleId)
}
impl Into<DestinationNode> for AlphaId {
fn into(self) -> DestinationNode {
DestinationNode::Alpha(self)
}
}
impl Into<DestinationNode> for BetaId {
fn into(self) -> DestinationNode {
DestinationNode::Beta(self)
}
}
impl Into<DestinationNode> for RuleId {
fn into(self) -> DestinationNode {
DestinationNode::Rule(self)
}
}
#[derive(Debug)]
pub struct HashEqNode {
id: HashEqId,
store: bool,
destinations: Vec<DestinationNode>
}
pub struct AlphaNode<T: ReteIntrospection> {
id: AlphaId,
test: AlphaTest<T>,
store: bool,
dest: Vec<DestinationNode>
}
pub struct AlphaMemory<T: ReteIntrospection> {
mem: HashMap<MemoryId, HashSet<Rc<T>>>,
}
impl<T: ReteIntrospection> AlphaMemory<T> {
pub fn insert<I: Into<MemoryId> + AlphaMemoryId>(&mut self, id: I, val: Rc<T>) {
let mem_id = id.into();
self.mem.entry(mem_id)
.or_insert_with(Default::default)
.insert(val);
}
}
pub struct AlphaNetwork<T: ReteIntrospection> {
hash_eq_node: HashMap<T::HashEq, HashEqNode>,
alpha_network: Vec<AlphaNode<T>>
}
pub struct FactStore<T: ReteIntrospection> {
store: HashSet<Rc<T>>
}
impl<T: ReteIntrospection> FactStore<T> {
pub fn insert(&mut self, val: T) -> Rc<T> |
}
pub enum BetaNodeType {
And(MemoryId, MemoryId)
}
pub struct BetaNode {
id: BetaId,
b_type: BetaNodeType,
destinations: Vec<DestinationNode>
}
pub struct BetaNetwork {
b_network: Vec<BetaNode>
}
pub struct BetaMemory {
tripwire: Vec<bool>,
}
| {
let rc = Rc::new(val);
if !self.store.insert(rc.clone()) {
self.store.get(&rc).unwrap().clone()
} else {
rc
}
} | identifier_body |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else |
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
} | conditional_block |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
| /// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
} | /// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the | random_line_split |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf |
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| {
self.destruct().unwrap_err()
} | identifier_body |
lib.rs | #![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![cfg_attr(feature="clippy_pedantic", warn(clippy_pedantic))]
// Clippy doesn't like this pattern, but I do. I may consider changing my mind
// on this in the future, just to make clippy happy.
#![cfg_attr(all(feature="clippy", not(feature="clippy_pedantic")),
allow(needless_range_loop))]
#[macro_use]
mod util;
pub mod evolve;
pub mod format;
pub mod global;
//pub use evolve::Hashlife;
mod block;
mod leaf;
mod cache;
use std::cell::{RefCell, RefMut};
use std::fmt;
use num::{BigUint, One, FromPrimitive};
pub use crate::leaf::{Leaf, LG_LEAF_SIZE, LEAF_SIZE, LEAF_MASK};
use crate::block::{
Block as RawBlock,
Node as RawNode,
CABlockCache,
};
use crate::util::make_2x2;
/// Global state for the Hashlife algorithm. For information on the lifetime
/// parameter see `block::CABlockHash`.
struct HashlifeCache<'a> {
table: RefCell<CABlockCache<'a>>,
small_evolve_cache: [u8; 1<<16],
blank_cache: RefCell<Vec<RawBlock<'a>>>,
//placeholder_node: Node<'a>,
}
#[derive(Clone, Copy, Debug)]
pub struct Hashlife<'a>(&'a HashlifeCache<'a>);
#[derive(Clone, Copy, Debug)]
pub struct Block<'a> {
raw: RawBlock<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
#[derive(Clone, Copy, Debug)]
pub struct Node<'a> {
raw: RawNode<'a>,
hl: Hashlife<'a>,
lg_size: usize,
}
impl<'a> Drop for HashlifeCache<'a> {
fn drop(&mut self) {
self.blank_cache.get_mut().clear();
}
}
impl<'a> Hashlife<'a> {
/// Create a new Hashlife and pass it to a function. For explanation on why
/// this function calling convention is used see `CABlockCache::with_new`
pub fn with_new<F,T>(f: F) -> T
where F: for<'b> FnOnce(Hashlife<'b>) -> T {
CABlockCache::with_new(|bcache| {
//let placeholder_node = bcache.new_block([[Block::Leaf(0); 2]; 2]);
let hashlife_cache = HashlifeCache {
table: RefCell::new(bcache),
small_evolve_cache: evolve::mk_small_evolve_cache(),
blank_cache: RefCell::new(vec![RawBlock::Leaf(0)]),
//placeholder_node: placeholder_node,
};
let hashlife = unsafe {&*(&hashlife_cache as *const _)};
f(Hashlife(hashlife))
})
}
/// Create a new raw node with `elems` as corners
pub fn raw_node(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawNode<'a> {
self.block_cache().node(elems)
}
/// Creates a node `elems` as corners. Panics with sizes don't match.
pub fn node(&self, elems: [[Block<'a>; 2]; 2]) -> Node<'a> {
let elem_lg_size = elems[0][0].lg_size();
make_2x2(|i, j| assert_eq!(elems[i][j].lg_size(), elem_lg_size,
"Sizes don't match in new node"));
let raw_elems = make_2x2(|i, j| elems[i][j].to_raw());
Node {
raw: self.raw_node(raw_elems),
hl: *self,
lg_size: elem_lg_size + 1,
}
}
/// Create a new block with `elems` as corners
pub fn raw_node_block(&self, elems: [[RawBlock<'a>; 2]; 2]) -> RawBlock<'a>
{
RawBlock::Node(self.raw_node(elems))
}
/// Creates a new block with `elems` as corners. Panics if sizes don't
/// match.
pub fn node_block(&self, elems: [[Block<'a>; 2]; 2]) -> Block<'a> {
Block::from_node(self.node(elems))
}
/// Creates leaf block
pub fn leaf(&self, leaf: Leaf) -> Block<'a> {
Block {
raw: RawBlock::Leaf(leaf),
hl: *self,
lg_size: LG_LEAF_SIZE,
}
}
/// Reference to underlying block cache (I don't remember why I made it
/// public)
pub fn block_cache(&self) -> RefMut<CABlockCache<'a>> {
self.0.table.borrow_mut()
}
/// Small block cache for `evolve`
pub fn small_evolve_cache(&self) -> &[u8; 1<<16] {
&self.0.small_evolve_cache
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the raw version of big stepping.
pub fn raw_evolve(&self, node: RawNode<'a>) -> RawBlock<'a> {
evolve::evolve(self, node, node.lg_size() - LG_LEAF_SIZE - 1)
}
/// Given 2^(n+1)x2^(n+1) node `node`, progress it 2^(n-1) generations and
/// return 2^nx2^n block in the center. This is the main component of the
/// Hashlife algorithm.
///
/// This is the normal version of big stepping.
pub fn big_step(&self, node: Node<'a>) -> Block<'a> {
Block {
raw: self.raw_evolve(node.to_raw()),
hl: *self,
lg_size: node.lg_size - 1,
}
}
/// Given 2^(n+1)x2^(n+1) block, return 2^nx2^n subblock that's y*2^(n-1)
/// south and x*2^(n-1) east of the north-west corner.
///
/// Public for use in other modules in this crate; don't rely on it.
pub fn raw_subblock(&self, node: RawNode<'a>, y: u8, x: u8) -> RawBlock<'a>
{
evolve::subblock(self, node, y, x)
}
/// Returns a raw blank block (all the cells are dead) with a given depth
pub fn raw_blank(&self, lg_size: usize) -> RawBlock<'a> {
let depth = lg_size - LG_LEAF_SIZE;
let mut blank_cache = self.0.blank_cache.borrow_mut();
if depth < blank_cache.len() {
blank_cache[depth]
} else {
let mut big_blank = *blank_cache.last().unwrap();
let repeats = depth + 1 - blank_cache.len();
for _ in 0..repeats {
big_blank = self.raw_node_block([[big_blank; 2]; 2]);
blank_cache.push(big_blank);
}
big_blank
}
}
/// Returns a blank block (all the cells are dead) with a given depth
pub fn blank(&self, lg_size: usize) -> Block<'a> {
Block {
raw: self.raw_blank(lg_size),
hl: *self,
lg_size: lg_size,
}
}
fn block_from_raw(&self, raw: RawBlock<'a>) -> Block<'a> {
Block {
raw: raw,
hl: *self,
lg_size: raw.lg_size_verified().unwrap(),
}
}
fn node_from_raw(&self, raw: RawNode<'a>) -> Node<'a> {
Node {
raw: raw,
hl: *self,
lg_size: RawBlock::Node(raw).lg_size_verified().unwrap(),
}
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn raw_step_pow2(&self, node: RawNode<'a>, lognsteps: usize) ->
RawBlock<'a> {
evolve::step_pow2(self, node, lognsteps)
}
/// Return sidelength 2^(n-1) block at the center of node after it evolved
/// for 2^lognsteps steps.
pub fn step_pow2(&self, node: Node<'a>, lognsteps: usize) -> Block<'a> {
assert!(lognsteps + 2 <= node.lg_size());
let raw_node = self.raw_step_pow2(node.to_raw(), lognsteps);
Block {
raw: raw_node,
hl: *self,
lg_size: node.lg_size() - 1
}
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step(&self, node: Node<'a>, nstep: u64) -> Block<'a> {
self.step_bigu(node, &BigUint::from_u64(nstep).unwrap())
}
/// Return sidelength 2^(n-1) block at the center of the node after it
/// evolved `nstep` steps. Requires `nstep < 2**(n-2)`.
pub fn step_bigu(&self, node: Node<'a>, nstep: &BigUint) -> Block<'a> {
assert!(*nstep < BigUint::one() << (node.lg_size() - 2));
let raw = evolve::step_u(self, node.to_raw(), node.lg_size() -
LG_LEAF_SIZE - 1, nstep);
Block {
raw: raw,
hl: *self,
lg_size: node.lg_size() - 1,
}
}
/// Return a block with all cells set randomly of size `2 ** lg_size`.
pub fn random_block<R:rand::Rng>(&self, rng: &mut R, lg_size: usize) -> Block<'a> {
if lg_size == LG_LEAF_SIZE {
let leaf = rng.gen::<Leaf>() & LEAF_MASK;
self.leaf(leaf)
} else {
self.node_block(make_2x2(|_,_| self.random_block(rng, lg_size-1)))
}
}
}
impl<'a> fmt::Debug for HashlifeCache<'a> {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Hashlife instance>")
}
}
impl<'a> Node<'a> {
pub fn to_raw(&self) -> RawNode<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn evolve(&self) -> Block<'a> {
self.hl.block_from_raw(self.hl.raw_evolve(self.raw))
}
pub fn corners(&self) -> [[Block<'a>; 2]; 2] {
make_2x2(|i, j| self.hl.block_from_raw(self.raw.corners()[i][j]))
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn node_of_leafs(&self) -> bool {
self.lg_size == 1
}
}
impl<'a> PartialEq for Node<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Node<'a> {}
impl<'a> Block<'a> {
pub fn to_raw(&self) -> RawBlock<'a> {
self.raw
}
pub fn hashlife_instance(&self) -> Hashlife<'a> {
self.hl
}
pub fn from_node(node: Node<'a>) -> Self {
Block {
raw: RawBlock::Node(node.raw),
hl: node.hl,
lg_size: node.lg_size,
}
}
pub fn destruct(self) -> Result<Node<'a>, Leaf> {
match self.raw {
RawBlock::Node(n) => Ok(self.hl.node_from_raw(n)),
RawBlock::Leaf(l) => Err(l),
}
}
pub fn unwrap_leaf(self) -> Leaf {
self.destruct().unwrap_err()
}
pub fn unwrap_node(self) -> Node<'a> {
self.destruct().unwrap()
}
pub fn lg_size(&self) -> usize {
self.lg_size
}
pub fn lg_size_verified(&self) -> Result<usize, ()> {
Ok(self.lg_size())
}
pub fn is_blank(&self) -> bool {
self.raw.is_blank()
}
}
impl<'a> PartialEq for Block<'a> {
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
impl<'a> Eq for Block<'a> {}
#[cfg(test)]
mod test {
use super::Hashlife;
use crate::leaf::LG_LEAF_SIZE;
use crate::block::Block;
#[test]
fn test_blank0() {
Hashlife::with_new(|hl| {
let blank3 = hl.raw_blank(5);
assert_eq!(blank3.lg_size(), 5);
let blank1 = hl.raw_blank(3);
let blank2 = hl.raw_blank(4);
assert_eq!(blank3.unwrap_node().corners(), &[[blank2; 2]; 2]);
assert_eq!(blank2.unwrap_node().corners(), &[[blank1; 2]; 2]);
});
}
#[test]
fn test_blank1() {
Hashlife::with_new(|hl| {
assert_eq!(hl.raw_blank(LG_LEAF_SIZE), Block::Leaf(0));
assert_eq!(hl.raw_blank(4).lg_size(), 4);
assert_eq!(hl.raw_blank(5).lg_size(), 5);
});
}
}
| fmt | identifier_name |
server_cgi.go | /*
* Copyright (c) 2015, Shinya Yagyu
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package cgi
import (
"math"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/net/websocket"
"github.com/shingetsu-gou/go-nat"
"github.com/shingetsu-gou/http-relay"
"github.com/shingetsu-gou/shingetsu-gou/node"
"github.com/shingetsu-gou/shingetsu-gou/thread"
"github.com/shingetsu-gou/shingetsu-gou/util"
)
//ServerURL is the url to server.cgi
const ServerURL = "/server.cgi"
//ServerSetup setups handlers for server.cgi
func ServerSetup(s *LoggingServeMux, relaynum int) |
//doRelay relays url to websocket.
//e.g. accept http://relay.com:8000/server.cgi/relay/client.com:8000/server.cgi/join/other.com:8000+server.cgi
func doRelay(w http.ResponseWriter, r *http.Request) {
reg := regexp.MustCompile("^" + ServerURL + `/relay/(([^/]+)/[^/]*.*)`)
m := reg.FindStringSubmatch(r.URL.Path)
if m == nil || len(m) < 3 {
log.Println("invalid path", r.URL.Path)
return
}
backup := r.URL
var err error
r.URL, err = url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
host, port, err := net.SplitHostPort(m[2])
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
relay.HandleServer(host, w, r, func(res *relay.ResponseWriter) bool {
return true
})
r.URL = backup
}
//doProxy does proxiy ,
//e.g. accept http://relay.com:8000/server.cgi/proxy/other.com:8000/server.cgi/join/client.com:8000+server.cgi
//and proxy to http://other.com:8000/server.cgi/join/client.com:8000+server.cgi
//path format of proxy url must be /*/*/[join|bye|update]/* not to be abused.
func doProxy(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
if !relay.IsAccepted(host) {
log.Println(host, "is not accepted")
return
}
reg := regexp.MustCompile("^" + ServerURL + "/proxy/(.*)$")
m := reg.FindStringSubmatch(r.URL.Path)
if len(m) < 2 {
log.Println("invalid path", r.URL.Path)
return
}
u, err := url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
if !validPath(u.Path) {
log.Println("invalid path", u.Path)
return
}
rp := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL = u
},
}
rp.ServeHTTP(w, r)
}
func validPath(path string) bool {
cmd := strings.Split(path, "/")
switch {
case len(cmd) > 2 && (cmd[2] == "bye" || cmd[2] == "join"):
case len(cmd) > 5 && cmd[2] == "update":
default:
return false
}
return true
}
//websocketRelay accepts websocket relay.
//e.g. accept url http://relay.com:8000/server.cgin/request_relay/
func websocketRelay(relaynum int) func(*websocket.Conn) {
return func(ws *websocket.Conn) {
if n := relay.Count(); int(n) >= relaynum {
log.Println("num of relays", n, "is over", relaynum)
return
}
host, port, err := net.SplitHostPort(ws.Request().RemoteAddr)
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
log.Println("websocket client:", host, port)
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
log.Println(host, "is accepted.")
relay.StartServe(host, ws)
}
}
//doPing just resopnse PONG with remote addr.
func doPing(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
fmt.Fprint(w, "PONG\n"+host+"\n")
}
//doNode returns one of nodelist. if nodelist.len=0 returns one of initNode.
func doNode(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
if s.NodeManager.ListLen() > 0 {
fmt.Fprintln(w, s.NodeManager.GetNodestrSliceInTable("")[0])
} else {
fmt.Fprintln(w, s.InitNode.GetData()[0])
}
}
//doJoin adds node specified in url to searchlist and nodelist.
//if nodelist>#defaultnode removes and says bye one node in nodelist and returns welcome its ip:port.
func doJoin(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("join")
host = s.remoteIP(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
return
}
if _, err := n.Ping(); err != nil {
return
}
suggest := s.NodeManager.ReplaceNodeInList(n)
if suggest == nil {
fmt.Fprintln(s.wr, "WELCOME")
return
}
fmt.Fprintf(s.wr, "WELCOME\n%s\n", suggest.Nodestr)
}
//doBye removes from nodelist and says bye to the node specified in url.
func doBye(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("bye")
host = s.checkRemote(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err == nil {
s.NodeManager.RemoveFromList(n)
}
fmt.Fprintln(s.wr, "BYEBYE")
}
//doHave checks existance of cache whose name is specified in url.
func doHave(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^have/([0-9A-Za-z_]+)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
fmt.Fprintln(w, "NO")
log.Println("illegal url")
return
}
ca := thread.NewCache(m[1])
if ca.HasRecord() {
fmt.Fprintln(w, "YES")
} else {
fmt.Fprintln(w, "NO")
}
}
//doUpdate adds remote node to searchlist and lookuptable with datfile specified in url.
//if stamp is in range of defaultUpdateRange adds to updateque.
func doUpdate(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
log.Println("failed to create cgi struct")
return
}
reg := regexp.MustCompile(`^update/(\w+)/(\d+)/(\w+)/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
datfile, stamp, id, hostport, path := m[1], m[2], m[3], m[4], m[5]
host, portstr, err := net.SplitHostPort(hostport)
if err != nil {
log.Println(err)
return
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return
}
host = s.remoteIP(host)
if host == "" {
log.Println("host is null")
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
log.Println("detects spam")
return
}
s.NodeManager.AppendToTable(datfile, n)
s.NodeManager.Sync()
nstamp, err := strconv.ParseInt(stamp, 10, 64)
if err != nil {
log.Println(err)
return
}
if !thread.IsInUpdateRange(nstamp) {
return
}
rec := thread.NewRecord(datfile, stamp+"_"+id)
go s.UpdateQue.UpdateNodes(rec, n)
fmt.Fprintln(w, "OK")
}
//doRecent renders records whose timestamp is in range of one specified in url.
func doRecent(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^recent/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
stamp := m[1]
last := time.Now().Unix() + s.RecentRange
begin, end, _ := s.parseStamp(stamp, last)
for _, i := range s.RecentList.GetRecords() {
if begin > i.Stamp || i.Stamp > end {
continue
}
ca := thread.NewCache(i.Datfile)
cont := fmt.Sprintf("%d<>%s<>%s", i.Stamp, i.ID, i.Datfile)
if ca.LenTags() > 0 {
cont += "<>tag:" + ca.TagString()
}
_, err := fmt.Fprintf(w, "%s\n", cont)
if err != nil {
log.Println(err)
}
}
}
//doMotd simply renders motd file.
func doMotd(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
f, err := ioutil.ReadFile(s.Motd)
if err != nil {
log.Println(err)
return
}
fmt.Fprintf(w, string(f))
}
//doGetHead renders records contents(get) or id+timestamp(head) who has id and
// whose stamp is in range of one specified by url.
func doGetHead(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^(get|head|removed)/([0-9A-Za-z_]+)/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url", s.path())
return
}
method, datfile, stamp := m[1], m[2], m[3]
ca := thread.NewCache(datfile)
begin, end, id := s.parseStamp(stamp, math.MaxInt32)
var recs thread.RecordMap
if method == "removed" {
recs = ca.LoadRemovedRecords()
} else {
recs = ca.LoadRecords()
}
for _, r := range recs {
if r.InRange(begin, end, id) {
if method == "get" {
if err := r.Load(); err != nil {
log.Println(err)
continue
}
fmt.Fprintln(s.wr, r.Recstr())
continue
}
fmt.Fprintln(s.wr, strings.Replace(r.Idstr(), "_", "<>", -1))
}
}
if method == "get" {
thread.UpdatedRecord.Inform(datfile, id, begin, end)
}
}
//ServerCfg is config for serverCGI struct.
//must set beforehand.
var ServerCfg *ServerConfig
//ServerConfig is config for serverCGI struct.
type ServerConfig struct {
RecentRange int64
Motd string
NodeManager *node.Manager
InitNode *util.ConfList
UpdateQue *thread.UpdateQue
RecentList *thread.RecentList
}
//serverCGI is for server.cgi handler.
type serverCGI struct {
*ServerConfig
*cgi
}
//newServerCGI set content-type to text and returns serverCGI obj.
func newServerCGI(w http.ResponseWriter, r *http.Request) (serverCGI, error) {
if ServerCfg == nil {
log.Fatal("must set ServerCfg")
}
c := serverCGI{
ServerConfig: ServerCfg,
cgi: newCGI(w, r),
}
if c.cgi == nil {
return c, errors.New("cannot make CGI")
}
if w != nil {
w.Header().Set("Content-Type", "text/plain")
}
return c, nil
}
//remoteIP returns host if host!=""
//else returns remoteaddr
func (s *serverCGI) remoteIP(host string) string {
if host != "" {
return host
}
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if !isGlobal(remoteAddr) {
log.Println(remoteAddr, "is local IP")
return ""
}
return remoteAddr
}
func isGlobal(remoteAddr string) bool {
ip := net.ParseIP(remoteAddr)
if ip == nil {
log.Println(remoteAddr,"has illegal format")
return false
}
return nat.IsGlobalIP(ip) != ""
}
//checkRemote returns remoteaddr
//if host is specified returns remoteaddr if host==remoteaddr.
func (s *serverCGI) checkRemote(host string) string {
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if host == "" {
return remoteAddr
}
ipaddr, err := net.LookupIP(host)
if err != nil {
log.Println(err)
return ""
}
for _, ipa := range ipaddr {
if ipa.String() == remoteAddr {
return remoteAddr
}
}
return ""
}
//makeNode makes and returns node obj from /method/ip:port.
func (s *serverCGI) extractHost(method string) (string, string, int) {
reg := regexp.MustCompile("^" + method + `/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return "", "", 0
}
path := m[2]
host, portstr, err := net.SplitHostPort(m[1])
if err != nil {
log.Println(err)
return "", "", 0
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return "", "", 0
}
return host, path, port
}
//parseStamp parses format beginStamp - endStamp/id and returns them.
//if endStamp is not specified returns last as endStamp.
func (s *serverCGI) parseStamp(stamp string, last int64) (int64, int64, string) {
buf := strings.Split(stamp, "/")
var id string
if len(buf) > 1 {
id = buf[1]
stamp = buf[0]
}
buf = strings.Split(stamp, "-")
nstamps := make([]int64, len(buf))
var err error
for i, nstamp := range buf {
if nstamp == "" {
continue
}
nstamps[i], err = strconv.ParseInt(nstamp, 10, 64)
if err != nil {
return 0, 0, ""
}
}
switch {
case stamp == "", stamp == "-":
return 0, last, id
case strings.HasSuffix(stamp, "-"):
return nstamps[0], last, id
case len(buf) == 1:
return nstamps[0], nstamps[0], id
case buf[0] == "":
return 0, nstamps[1], id
default:
return nstamps[0], nstamps[1], id
}
}
| {
s.RegistCompressHandler(ServerURL+"/ping", doPing)
s.RegistCompressHandler(ServerURL+"/node", doNode)
s.RegistCompressHandler(ServerURL+"/join/", doJoin)
s.RegistCompressHandler(ServerURL+"/bye/", doBye)
s.RegistCompressHandler(ServerURL+"/have/", doHave)
s.RegistCompressHandler(ServerURL+"/removed/", doGetHead)
s.RegistCompressHandler(ServerURL+"/get/", doGetHead)
s.RegistCompressHandler(ServerURL+"/head/", doGetHead)
s.RegistCompressHandler(ServerURL+"/update/", doUpdate)
s.RegistCompressHandler(ServerURL+"/recent/", doRecent)
s.RegistCompressHandler(ServerURL+"/", doMotd)
if relaynum > 0 {
s.HandleFunc(ServerURL+"/proxy/", doProxy)
s.HandleFunc(ServerURL+"/relay/", doRelay)
s.Handle(ServerURL+"/request_relay/", websocket.Handler(websocketRelay(relaynum)))
}
} | identifier_body |
server_cgi.go | /*
* Copyright (c) 2015, Shinya Yagyu
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package cgi
import (
"math"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/net/websocket"
"github.com/shingetsu-gou/go-nat"
"github.com/shingetsu-gou/http-relay"
"github.com/shingetsu-gou/shingetsu-gou/node"
"github.com/shingetsu-gou/shingetsu-gou/thread"
"github.com/shingetsu-gou/shingetsu-gou/util"
)
//ServerURL is the url to server.cgi
const ServerURL = "/server.cgi"
//ServerSetup setups handlers for server.cgi
func ServerSetup(s *LoggingServeMux, relaynum int) {
s.RegistCompressHandler(ServerURL+"/ping", doPing)
s.RegistCompressHandler(ServerURL+"/node", doNode)
s.RegistCompressHandler(ServerURL+"/join/", doJoin)
s.RegistCompressHandler(ServerURL+"/bye/", doBye)
s.RegistCompressHandler(ServerURL+"/have/", doHave)
s.RegistCompressHandler(ServerURL+"/removed/", doGetHead)
s.RegistCompressHandler(ServerURL+"/get/", doGetHead)
s.RegistCompressHandler(ServerURL+"/head/", doGetHead)
s.RegistCompressHandler(ServerURL+"/update/", doUpdate)
s.RegistCompressHandler(ServerURL+"/recent/", doRecent)
s.RegistCompressHandler(ServerURL+"/", doMotd)
if relaynum > 0 {
s.HandleFunc(ServerURL+"/proxy/", doProxy)
s.HandleFunc(ServerURL+"/relay/", doRelay)
s.Handle(ServerURL+"/request_relay/", websocket.Handler(websocketRelay(relaynum)))
}
}
//doRelay relays url to websocket.
//e.g. accept http://relay.com:8000/server.cgi/relay/client.com:8000/server.cgi/join/other.com:8000+server.cgi
func doRelay(w http.ResponseWriter, r *http.Request) {
reg := regexp.MustCompile("^" + ServerURL + `/relay/(([^/]+)/[^/]*.*)`)
m := reg.FindStringSubmatch(r.URL.Path)
if m == nil || len(m) < 3 {
log.Println("invalid path", r.URL.Path)
return
}
backup := r.URL
var err error
r.URL, err = url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
host, port, err := net.SplitHostPort(m[2])
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
relay.HandleServer(host, w, r, func(res *relay.ResponseWriter) bool {
return true
})
r.URL = backup
}
//doProxy does proxiy ,
//e.g. accept http://relay.com:8000/server.cgi/proxy/other.com:8000/server.cgi/join/client.com:8000+server.cgi
//and proxy to http://other.com:8000/server.cgi/join/client.com:8000+server.cgi
//path format of proxy url must be /*/*/[join|bye|update]/* not to be abused.
func doProxy(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
if !relay.IsAccepted(host) {
log.Println(host, "is not accepted")
return
}
reg := regexp.MustCompile("^" + ServerURL + "/proxy/(.*)$")
m := reg.FindStringSubmatch(r.URL.Path)
if len(m) < 2 {
log.Println("invalid path", r.URL.Path)
return
}
u, err := url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
if !validPath(u.Path) {
log.Println("invalid path", u.Path)
return
}
rp := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL = u
},
}
rp.ServeHTTP(w, r)
}
func validPath(path string) bool {
cmd := strings.Split(path, "/")
switch {
case len(cmd) > 2 && (cmd[2] == "bye" || cmd[2] == "join"):
case len(cmd) > 5 && cmd[2] == "update":
default:
return false
}
return true
}
//websocketRelay accepts websocket relay.
//e.g. accept url http://relay.com:8000/server.cgin/request_relay/
func websocketRelay(relaynum int) func(*websocket.Conn) {
return func(ws *websocket.Conn) {
if n := relay.Count(); int(n) >= relaynum {
log.Println("num of relays", n, "is over", relaynum)
return
}
host, port, err := net.SplitHostPort(ws.Request().RemoteAddr)
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
log.Println("websocket client:", host, port)
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
log.Println(host, "is accepted.")
relay.StartServe(host, ws)
}
}
//doPing just resopnse PONG with remote addr.
func doPing(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
fmt.Fprint(w, "PONG\n"+host+"\n")
}
//doNode returns one of nodelist. if nodelist.len=0 returns one of initNode.
func doNode(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
if s.NodeManager.ListLen() > 0 {
fmt.Fprintln(w, s.NodeManager.GetNodestrSliceInTable("")[0])
} else {
fmt.Fprintln(w, s.InitNode.GetData()[0])
}
}
//doJoin adds node specified in url to searchlist and nodelist.
//if nodelist>#defaultnode removes and says bye one node in nodelist and returns welcome its ip:port.
func doJoin(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("join")
host = s.remoteIP(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
return
}
if _, err := n.Ping(); err != nil {
return
}
suggest := s.NodeManager.ReplaceNodeInList(n)
if suggest == nil {
fmt.Fprintln(s.wr, "WELCOME")
return
}
fmt.Fprintf(s.wr, "WELCOME\n%s\n", suggest.Nodestr)
}
//doBye removes from nodelist and says bye to the node specified in url.
func doBye(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("bye")
host = s.checkRemote(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err == nil {
s.NodeManager.RemoveFromList(n)
}
fmt.Fprintln(s.wr, "BYEBYE")
}
//doHave checks existance of cache whose name is specified in url.
func doHave(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^have/([0-9A-Za-z_]+)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
fmt.Fprintln(w, "NO")
log.Println("illegal url")
return
}
ca := thread.NewCache(m[1])
if ca.HasRecord() {
fmt.Fprintln(w, "YES")
} else {
fmt.Fprintln(w, "NO")
}
}
//doUpdate adds remote node to searchlist and lookuptable with datfile specified in url.
//if stamp is in range of defaultUpdateRange adds to updateque.
func doUpdate(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
log.Println("failed to create cgi struct")
return
}
reg := regexp.MustCompile(`^update/(\w+)/(\d+)/(\w+)/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
datfile, stamp, id, hostport, path := m[1], m[2], m[3], m[4], m[5]
host, portstr, err := net.SplitHostPort(hostport)
if err != nil {
log.Println(err)
return
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return
}
host = s.remoteIP(host)
if host == "" {
log.Println("host is null")
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
log.Println("detects spam")
return
}
s.NodeManager.AppendToTable(datfile, n)
s.NodeManager.Sync()
nstamp, err := strconv.ParseInt(stamp, 10, 64)
if err != nil {
log.Println(err)
return
}
if !thread.IsInUpdateRange(nstamp) {
return
}
rec := thread.NewRecord(datfile, stamp+"_"+id)
go s.UpdateQue.UpdateNodes(rec, n)
fmt.Fprintln(w, "OK")
}
//doRecent renders records whose timestamp is in range of one specified in url.
func doRecent(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^recent/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
stamp := m[1]
last := time.Now().Unix() + s.RecentRange
begin, end, _ := s.parseStamp(stamp, last)
for _, i := range s.RecentList.GetRecords() {
if begin > i.Stamp || i.Stamp > end {
continue
}
ca := thread.NewCache(i.Datfile)
cont := fmt.Sprintf("%d<>%s<>%s", i.Stamp, i.ID, i.Datfile)
if ca.LenTags() > 0 {
cont += "<>tag:" + ca.TagString()
}
_, err := fmt.Fprintf(w, "%s\n", cont)
if err != nil {
log.Println(err)
}
}
}
//doMotd simply renders motd file.
func doMotd(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
f, err := ioutil.ReadFile(s.Motd)
if err != nil {
log.Println(err)
return
}
fmt.Fprintf(w, string(f))
}
//doGetHead renders records contents(get) or id+timestamp(head) who has id and
// whose stamp is in range of one specified by url.
func doGetHead(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^(get|head|removed)/([0-9A-Za-z_]+)/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url", s.path())
return
}
method, datfile, stamp := m[1], m[2], m[3]
ca := thread.NewCache(datfile)
begin, end, id := s.parseStamp(stamp, math.MaxInt32)
var recs thread.RecordMap
if method == "removed" {
recs = ca.LoadRemovedRecords()
} else {
recs = ca.LoadRecords()
}
for _, r := range recs {
if r.InRange(begin, end, id) {
if method == "get" {
if err := r.Load(); err != nil {
log.Println(err)
continue
}
fmt.Fprintln(s.wr, r.Recstr())
continue
}
fmt.Fprintln(s.wr, strings.Replace(r.Idstr(), "_", "<>", -1))
}
}
if method == "get" {
thread.UpdatedRecord.Inform(datfile, id, begin, end)
}
}
//ServerCfg is config for serverCGI struct.
//must set beforehand.
var ServerCfg *ServerConfig
//ServerConfig is config for serverCGI struct.
type ServerConfig struct {
RecentRange int64
Motd string
NodeManager *node.Manager
InitNode *util.ConfList
UpdateQue *thread.UpdateQue
RecentList *thread.RecentList
}
//serverCGI is for server.cgi handler.
type serverCGI struct {
*ServerConfig
*cgi
}
//newServerCGI set content-type to text and returns serverCGI obj.
func newServerCGI(w http.ResponseWriter, r *http.Request) (serverCGI, error) {
if ServerCfg == nil {
log.Fatal("must set ServerCfg")
}
c := serverCGI{
ServerConfig: ServerCfg,
cgi: newCGI(w, r),
}
if c.cgi == nil {
return c, errors.New("cannot make CGI")
}
if w != nil {
w.Header().Set("Content-Type", "text/plain")
}
return c, nil
}
//remoteIP returns host if host!=""
//else returns remoteaddr
func (s *serverCGI) remoteIP(host string) string {
if host != "" {
return host
}
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if !isGlobal(remoteAddr) {
log.Println(remoteAddr, "is local IP")
return ""
}
return remoteAddr
}
func isGlobal(remoteAddr string) bool {
ip := net.ParseIP(remoteAddr)
if ip == nil {
log.Println(remoteAddr,"has illegal format")
return false
}
return nat.IsGlobalIP(ip) != ""
}
//checkRemote returns remoteaddr
//if host is specified returns remoteaddr if host==remoteaddr.
func (s *serverCGI) checkRemote(host string) string {
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if host == "" {
return remoteAddr
}
ipaddr, err := net.LookupIP(host)
if err != nil {
log.Println(err)
return ""
}
for _, ipa := range ipaddr {
if ipa.String() == remoteAddr {
return remoteAddr
}
}
return ""
}
//makeNode makes and returns node obj from /method/ip:port.
func (s *serverCGI) extractHost(method string) (string, string, int) {
reg := regexp.MustCompile("^" + method + `/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return "", "", 0
}
path := m[2]
host, portstr, err := net.SplitHostPort(m[1])
if err != nil {
log.Println(err)
return "", "", 0
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return "", "", 0
}
return host, path, port
}
//parseStamp parses format beginStamp - endStamp/id and returns them.
//if endStamp is not specified returns last as endStamp.
func (s *serverCGI) | (stamp string, last int64) (int64, int64, string) {
buf := strings.Split(stamp, "/")
var id string
if len(buf) > 1 {
id = buf[1]
stamp = buf[0]
}
buf = strings.Split(stamp, "-")
nstamps := make([]int64, len(buf))
var err error
for i, nstamp := range buf {
if nstamp == "" {
continue
}
nstamps[i], err = strconv.ParseInt(nstamp, 10, 64)
if err != nil {
return 0, 0, ""
}
}
switch {
case stamp == "", stamp == "-":
return 0, last, id
case strings.HasSuffix(stamp, "-"):
return nstamps[0], last, id
case len(buf) == 1:
return nstamps[0], nstamps[0], id
case buf[0] == "":
return 0, nstamps[1], id
default:
return nstamps[0], nstamps[1], id
}
}
| parseStamp | identifier_name |
server_cgi.go | /*
* Copyright (c) 2015, Shinya Yagyu
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package cgi
import (
"math"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/net/websocket"
"github.com/shingetsu-gou/go-nat"
"github.com/shingetsu-gou/http-relay"
"github.com/shingetsu-gou/shingetsu-gou/node"
"github.com/shingetsu-gou/shingetsu-gou/thread"
"github.com/shingetsu-gou/shingetsu-gou/util"
)
//ServerURL is the url to server.cgi
const ServerURL = "/server.cgi"
//ServerSetup setups handlers for server.cgi
func ServerSetup(s *LoggingServeMux, relaynum int) {
s.RegistCompressHandler(ServerURL+"/ping", doPing)
s.RegistCompressHandler(ServerURL+"/node", doNode)
s.RegistCompressHandler(ServerURL+"/join/", doJoin)
s.RegistCompressHandler(ServerURL+"/bye/", doBye)
s.RegistCompressHandler(ServerURL+"/have/", doHave)
s.RegistCompressHandler(ServerURL+"/removed/", doGetHead)
s.RegistCompressHandler(ServerURL+"/get/", doGetHead)
s.RegistCompressHandler(ServerURL+"/head/", doGetHead)
s.RegistCompressHandler(ServerURL+"/update/", doUpdate)
s.RegistCompressHandler(ServerURL+"/recent/", doRecent)
s.RegistCompressHandler(ServerURL+"/", doMotd)
if relaynum > 0 {
s.HandleFunc(ServerURL+"/proxy/", doProxy)
s.HandleFunc(ServerURL+"/relay/", doRelay)
s.Handle(ServerURL+"/request_relay/", websocket.Handler(websocketRelay(relaynum)))
}
}
//doRelay relays url to websocket.
//e.g. accept http://relay.com:8000/server.cgi/relay/client.com:8000/server.cgi/join/other.com:8000+server.cgi
func doRelay(w http.ResponseWriter, r *http.Request) {
reg := regexp.MustCompile("^" + ServerURL + `/relay/(([^/]+)/[^/]*.*)`)
m := reg.FindStringSubmatch(r.URL.Path)
if m == nil || len(m) < 3 {
log.Println("invalid path", r.URL.Path)
return
}
backup := r.URL
var err error
r.URL, err = url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
host, port, err := net.SplitHostPort(m[2])
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
relay.HandleServer(host, w, r, func(res *relay.ResponseWriter) bool {
return true
})
r.URL = backup
}
//doProxy does proxiy ,
//e.g. accept http://relay.com:8000/server.cgi/proxy/other.com:8000/server.cgi/join/client.com:8000+server.cgi
//and proxy to http://other.com:8000/server.cgi/join/client.com:8000+server.cgi
//path format of proxy url must be /*/*/[join|bye|update]/* not to be abused.
func doProxy(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
if !relay.IsAccepted(host) {
log.Println(host, "is not accepted")
return
}
reg := regexp.MustCompile("^" + ServerURL + "/proxy/(.*)$")
m := reg.FindStringSubmatch(r.URL.Path)
if len(m) < 2 {
log.Println("invalid path", r.URL.Path)
return
}
u, err := url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
if !validPath(u.Path) {
log.Println("invalid path", u.Path)
return
}
rp := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL = u
},
}
rp.ServeHTTP(w, r)
}
func validPath(path string) bool {
cmd := strings.Split(path, "/")
switch {
case len(cmd) > 2 && (cmd[2] == "bye" || cmd[2] == "join"):
case len(cmd) > 5 && cmd[2] == "update":
default:
return false
}
return true
}
//websocketRelay accepts websocket relay.
//e.g. accept url http://relay.com:8000/server.cgin/request_relay/
func websocketRelay(relaynum int) func(*websocket.Conn) {
return func(ws *websocket.Conn) {
if n := relay.Count(); int(n) >= relaynum |
host, port, err := net.SplitHostPort(ws.Request().RemoteAddr)
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
log.Println("websocket client:", host, port)
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
log.Println(host, "is accepted.")
relay.StartServe(host, ws)
}
}
//doPing just resopnse PONG with remote addr.
func doPing(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
fmt.Fprint(w, "PONG\n"+host+"\n")
}
//doNode returns one of nodelist. if nodelist.len=0 returns one of initNode.
func doNode(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
if s.NodeManager.ListLen() > 0 {
fmt.Fprintln(w, s.NodeManager.GetNodestrSliceInTable("")[0])
} else {
fmt.Fprintln(w, s.InitNode.GetData()[0])
}
}
//doJoin adds node specified in url to searchlist and nodelist.
//if nodelist>#defaultnode removes and says bye one node in nodelist and returns welcome its ip:port.
func doJoin(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("join")
host = s.remoteIP(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
return
}
if _, err := n.Ping(); err != nil {
return
}
suggest := s.NodeManager.ReplaceNodeInList(n)
if suggest == nil {
fmt.Fprintln(s.wr, "WELCOME")
return
}
fmt.Fprintf(s.wr, "WELCOME\n%s\n", suggest.Nodestr)
}
//doBye removes from nodelist and says bye to the node specified in url.
func doBye(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("bye")
host = s.checkRemote(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err == nil {
s.NodeManager.RemoveFromList(n)
}
fmt.Fprintln(s.wr, "BYEBYE")
}
//doHave checks existance of cache whose name is specified in url.
func doHave(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^have/([0-9A-Za-z_]+)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
fmt.Fprintln(w, "NO")
log.Println("illegal url")
return
}
ca := thread.NewCache(m[1])
if ca.HasRecord() {
fmt.Fprintln(w, "YES")
} else {
fmt.Fprintln(w, "NO")
}
}
//doUpdate adds remote node to searchlist and lookuptable with datfile specified in url.
//if stamp is in range of defaultUpdateRange adds to updateque.
func doUpdate(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
log.Println("failed to create cgi struct")
return
}
reg := regexp.MustCompile(`^update/(\w+)/(\d+)/(\w+)/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
datfile, stamp, id, hostport, path := m[1], m[2], m[3], m[4], m[5]
host, portstr, err := net.SplitHostPort(hostport)
if err != nil {
log.Println(err)
return
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return
}
host = s.remoteIP(host)
if host == "" {
log.Println("host is null")
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
log.Println("detects spam")
return
}
s.NodeManager.AppendToTable(datfile, n)
s.NodeManager.Sync()
nstamp, err := strconv.ParseInt(stamp, 10, 64)
if err != nil {
log.Println(err)
return
}
if !thread.IsInUpdateRange(nstamp) {
return
}
rec := thread.NewRecord(datfile, stamp+"_"+id)
go s.UpdateQue.UpdateNodes(rec, n)
fmt.Fprintln(w, "OK")
}
//doRecent renders records whose timestamp is in range of one specified in url.
func doRecent(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^recent/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
stamp := m[1]
last := time.Now().Unix() + s.RecentRange
begin, end, _ := s.parseStamp(stamp, last)
for _, i := range s.RecentList.GetRecords() {
if begin > i.Stamp || i.Stamp > end {
continue
}
ca := thread.NewCache(i.Datfile)
cont := fmt.Sprintf("%d<>%s<>%s", i.Stamp, i.ID, i.Datfile)
if ca.LenTags() > 0 {
cont += "<>tag:" + ca.TagString()
}
_, err := fmt.Fprintf(w, "%s\n", cont)
if err != nil {
log.Println(err)
}
}
}
//doMotd simply renders motd file.
func doMotd(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
f, err := ioutil.ReadFile(s.Motd)
if err != nil {
log.Println(err)
return
}
fmt.Fprintf(w, string(f))
}
//doGetHead renders records contents(get) or id+timestamp(head) who has id and
// whose stamp is in range of one specified by url.
func doGetHead(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^(get|head|removed)/([0-9A-Za-z_]+)/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url", s.path())
return
}
method, datfile, stamp := m[1], m[2], m[3]
ca := thread.NewCache(datfile)
begin, end, id := s.parseStamp(stamp, math.MaxInt32)
var recs thread.RecordMap
if method == "removed" {
recs = ca.LoadRemovedRecords()
} else {
recs = ca.LoadRecords()
}
for _, r := range recs {
if r.InRange(begin, end, id) {
if method == "get" {
if err := r.Load(); err != nil {
log.Println(err)
continue
}
fmt.Fprintln(s.wr, r.Recstr())
continue
}
fmt.Fprintln(s.wr, strings.Replace(r.Idstr(), "_", "<>", -1))
}
}
if method == "get" {
thread.UpdatedRecord.Inform(datfile, id, begin, end)
}
}
//ServerCfg is config for serverCGI struct.
//must set beforehand.
var ServerCfg *ServerConfig
//ServerConfig is config for serverCGI struct.
type ServerConfig struct {
RecentRange int64
Motd string
NodeManager *node.Manager
InitNode *util.ConfList
UpdateQue *thread.UpdateQue
RecentList *thread.RecentList
}
//serverCGI is for server.cgi handler.
type serverCGI struct {
*ServerConfig
*cgi
}
//newServerCGI set content-type to text and returns serverCGI obj.
func newServerCGI(w http.ResponseWriter, r *http.Request) (serverCGI, error) {
if ServerCfg == nil {
log.Fatal("must set ServerCfg")
}
c := serverCGI{
ServerConfig: ServerCfg,
cgi: newCGI(w, r),
}
if c.cgi == nil {
return c, errors.New("cannot make CGI")
}
if w != nil {
w.Header().Set("Content-Type", "text/plain")
}
return c, nil
}
//remoteIP returns host if host!=""
//else returns remoteaddr
func (s *serverCGI) remoteIP(host string) string {
if host != "" {
return host
}
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if !isGlobal(remoteAddr) {
log.Println(remoteAddr, "is local IP")
return ""
}
return remoteAddr
}
func isGlobal(remoteAddr string) bool {
ip := net.ParseIP(remoteAddr)
if ip == nil {
log.Println(remoteAddr,"has illegal format")
return false
}
return nat.IsGlobalIP(ip) != ""
}
//checkRemote returns remoteaddr
//if host is specified returns remoteaddr if host==remoteaddr.
func (s *serverCGI) checkRemote(host string) string {
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if host == "" {
return remoteAddr
}
ipaddr, err := net.LookupIP(host)
if err != nil {
log.Println(err)
return ""
}
for _, ipa := range ipaddr {
if ipa.String() == remoteAddr {
return remoteAddr
}
}
return ""
}
//makeNode makes and returns node obj from /method/ip:port.
func (s *serverCGI) extractHost(method string) (string, string, int) {
reg := regexp.MustCompile("^" + method + `/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return "", "", 0
}
path := m[2]
host, portstr, err := net.SplitHostPort(m[1])
if err != nil {
log.Println(err)
return "", "", 0
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return "", "", 0
}
return host, path, port
}
//parseStamp parses format beginStamp - endStamp/id and returns them.
//if endStamp is not specified returns last as endStamp.
func (s *serverCGI) parseStamp(stamp string, last int64) (int64, int64, string) {
buf := strings.Split(stamp, "/")
var id string
if len(buf) > 1 {
id = buf[1]
stamp = buf[0]
}
buf = strings.Split(stamp, "-")
nstamps := make([]int64, len(buf))
var err error
for i, nstamp := range buf {
if nstamp == "" {
continue
}
nstamps[i], err = strconv.ParseInt(nstamp, 10, 64)
if err != nil {
return 0, 0, ""
}
}
switch {
case stamp == "", stamp == "-":
return 0, last, id
case strings.HasSuffix(stamp, "-"):
return nstamps[0], last, id
case len(buf) == 1:
return nstamps[0], nstamps[0], id
case buf[0] == "":
return 0, nstamps[1], id
default:
return nstamps[0], nstamps[1], id
}
}
| {
log.Println("num of relays", n, "is over", relaynum)
return
} | conditional_block |
server_cgi.go | /*
* Copyright (c) 2015, Shinya Yagyu
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | * POSSIBILITY OF SUCH DAMAGE.
*/
package cgi
import (
"math"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/net/websocket"
"github.com/shingetsu-gou/go-nat"
"github.com/shingetsu-gou/http-relay"
"github.com/shingetsu-gou/shingetsu-gou/node"
"github.com/shingetsu-gou/shingetsu-gou/thread"
"github.com/shingetsu-gou/shingetsu-gou/util"
)
//ServerURL is the url to server.cgi
const ServerURL = "/server.cgi"
//ServerSetup setups handlers for server.cgi
func ServerSetup(s *LoggingServeMux, relaynum int) {
s.RegistCompressHandler(ServerURL+"/ping", doPing)
s.RegistCompressHandler(ServerURL+"/node", doNode)
s.RegistCompressHandler(ServerURL+"/join/", doJoin)
s.RegistCompressHandler(ServerURL+"/bye/", doBye)
s.RegistCompressHandler(ServerURL+"/have/", doHave)
s.RegistCompressHandler(ServerURL+"/removed/", doGetHead)
s.RegistCompressHandler(ServerURL+"/get/", doGetHead)
s.RegistCompressHandler(ServerURL+"/head/", doGetHead)
s.RegistCompressHandler(ServerURL+"/update/", doUpdate)
s.RegistCompressHandler(ServerURL+"/recent/", doRecent)
s.RegistCompressHandler(ServerURL+"/", doMotd)
if relaynum > 0 {
s.HandleFunc(ServerURL+"/proxy/", doProxy)
s.HandleFunc(ServerURL+"/relay/", doRelay)
s.Handle(ServerURL+"/request_relay/", websocket.Handler(websocketRelay(relaynum)))
}
}
//doRelay relays url to websocket.
//e.g. accept http://relay.com:8000/server.cgi/relay/client.com:8000/server.cgi/join/other.com:8000+server.cgi
func doRelay(w http.ResponseWriter, r *http.Request) {
reg := regexp.MustCompile("^" + ServerURL + `/relay/(([^/]+)/[^/]*.*)`)
m := reg.FindStringSubmatch(r.URL.Path)
if m == nil || len(m) < 3 {
log.Println("invalid path", r.URL.Path)
return
}
backup := r.URL
var err error
r.URL, err = url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
host, port, err := net.SplitHostPort(m[2])
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
relay.HandleServer(host, w, r, func(res *relay.ResponseWriter) bool {
return true
})
r.URL = backup
}
//doProxy does proxiy ,
//e.g. accept http://relay.com:8000/server.cgi/proxy/other.com:8000/server.cgi/join/client.com:8000+server.cgi
//and proxy to http://other.com:8000/server.cgi/join/client.com:8000+server.cgi
//path format of proxy url must be /*/*/[join|bye|update]/* not to be abused.
func doProxy(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
if !relay.IsAccepted(host) {
log.Println(host, "is not accepted")
return
}
reg := regexp.MustCompile("^" + ServerURL + "/proxy/(.*)$")
m := reg.FindStringSubmatch(r.URL.Path)
if len(m) < 2 {
log.Println("invalid path", r.URL.Path)
return
}
u, err := url.ParseRequestURI("http://" + m[1])
if err != nil {
log.Println(err)
return
}
if !validPath(u.Path) {
log.Println("invalid path", u.Path)
return
}
rp := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL = u
},
}
rp.ServeHTTP(w, r)
}
func validPath(path string) bool {
cmd := strings.Split(path, "/")
switch {
case len(cmd) > 2 && (cmd[2] == "bye" || cmd[2] == "join"):
case len(cmd) > 5 && cmd[2] == "update":
default:
return false
}
return true
}
//websocketRelay accepts websocket relay.
//e.g. accept url http://relay.com:8000/server.cgin/request_relay/
func websocketRelay(relaynum int) func(*websocket.Conn) {
return func(ws *websocket.Conn) {
if n := relay.Count(); int(n) >= relaynum {
log.Println("num of relays", n, "is over", relaynum)
return
}
host, port, err := net.SplitHostPort(ws.Request().RemoteAddr)
if err != nil {
log.Println(err)
return
}
p, err := strconv.Atoi(port)
if err != nil {
log.Println(err)
return
}
log.Println("websocket client:", host, port)
n, err := node.MakeNode(host, "/server.cgi", p)
if err != nil {
log.Println(err)
return
}
if !n.IsAllowed() {
log.Println(n, "is not allowed")
return
}
log.Println(host, "is accepted.")
relay.StartServe(host, ws)
}
}
//doPing just resopnse PONG with remote addr.
func doPing(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Println(err)
return
}
fmt.Fprint(w, "PONG\n"+host+"\n")
}
//doNode returns one of nodelist. if nodelist.len=0 returns one of initNode.
func doNode(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
if s.NodeManager.ListLen() > 0 {
fmt.Fprintln(w, s.NodeManager.GetNodestrSliceInTable("")[0])
} else {
fmt.Fprintln(w, s.InitNode.GetData()[0])
}
}
//doJoin adds node specified in url to searchlist and nodelist.
//if nodelist>#defaultnode removes and says bye one node in nodelist and returns welcome its ip:port.
func doJoin(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("join")
host = s.remoteIP(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
return
}
if _, err := n.Ping(); err != nil {
return
}
suggest := s.NodeManager.ReplaceNodeInList(n)
if suggest == nil {
fmt.Fprintln(s.wr, "WELCOME")
return
}
fmt.Fprintf(s.wr, "WELCOME\n%s\n", suggest.Nodestr)
}
//doBye removes from nodelist and says bye to the node specified in url.
func doBye(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
host, path, port := s.extractHost("bye")
host = s.checkRemote(host)
if host == "" {
return
}
n, err := node.MakeNode(host, path, port)
if err == nil {
s.NodeManager.RemoveFromList(n)
}
fmt.Fprintln(s.wr, "BYEBYE")
}
//doHave checks existance of cache whose name is specified in url.
func doHave(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^have/([0-9A-Za-z_]+)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
fmt.Fprintln(w, "NO")
log.Println("illegal url")
return
}
ca := thread.NewCache(m[1])
if ca.HasRecord() {
fmt.Fprintln(w, "YES")
} else {
fmt.Fprintln(w, "NO")
}
}
//doUpdate adds remote node to searchlist and lookuptable with datfile specified in url.
//if stamp is in range of defaultUpdateRange adds to updateque.
func doUpdate(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
log.Println("failed to create cgi struct")
return
}
reg := regexp.MustCompile(`^update/(\w+)/(\d+)/(\w+)/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
datfile, stamp, id, hostport, path := m[1], m[2], m[3], m[4], m[5]
host, portstr, err := net.SplitHostPort(hostport)
if err != nil {
log.Println(err)
return
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return
}
host = s.remoteIP(host)
if host == "" {
log.Println("host is null")
return
}
n, err := node.MakeNode(host, path, port)
if err != nil || !n.IsAllowed() {
log.Println("detects spam")
return
}
s.NodeManager.AppendToTable(datfile, n)
s.NodeManager.Sync()
nstamp, err := strconv.ParseInt(stamp, 10, 64)
if err != nil {
log.Println(err)
return
}
if !thread.IsInUpdateRange(nstamp) {
return
}
rec := thread.NewRecord(datfile, stamp+"_"+id)
go s.UpdateQue.UpdateNodes(rec, n)
fmt.Fprintln(w, "OK")
}
//doRecent renders records whose timestamp is in range of one specified in url.
func doRecent(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^recent/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return
}
stamp := m[1]
last := time.Now().Unix() + s.RecentRange
begin, end, _ := s.parseStamp(stamp, last)
for _, i := range s.RecentList.GetRecords() {
if begin > i.Stamp || i.Stamp > end {
continue
}
ca := thread.NewCache(i.Datfile)
cont := fmt.Sprintf("%d<>%s<>%s", i.Stamp, i.ID, i.Datfile)
if ca.LenTags() > 0 {
cont += "<>tag:" + ca.TagString()
}
_, err := fmt.Fprintf(w, "%s\n", cont)
if err != nil {
log.Println(err)
}
}
}
//doMotd simply renders motd file.
func doMotd(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
f, err := ioutil.ReadFile(s.Motd)
if err != nil {
log.Println(err)
return
}
fmt.Fprintf(w, string(f))
}
//doGetHead renders records contents(get) or id+timestamp(head) who has id and
// whose stamp is in range of one specified by url.
func doGetHead(w http.ResponseWriter, r *http.Request) {
s, err := newServerCGI(w, r)
if err != nil {
log.Println(err)
return
}
reg := regexp.MustCompile("^(get|head|removed)/([0-9A-Za-z_]+)/?([-0-9A-Za-z/]*)$")
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url", s.path())
return
}
method, datfile, stamp := m[1], m[2], m[3]
ca := thread.NewCache(datfile)
begin, end, id := s.parseStamp(stamp, math.MaxInt32)
var recs thread.RecordMap
if method == "removed" {
recs = ca.LoadRemovedRecords()
} else {
recs = ca.LoadRecords()
}
for _, r := range recs {
if r.InRange(begin, end, id) {
if method == "get" {
if err := r.Load(); err != nil {
log.Println(err)
continue
}
fmt.Fprintln(s.wr, r.Recstr())
continue
}
fmt.Fprintln(s.wr, strings.Replace(r.Idstr(), "_", "<>", -1))
}
}
if method == "get" {
thread.UpdatedRecord.Inform(datfile, id, begin, end)
}
}
//ServerCfg is config for serverCGI struct.
//must set beforehand.
var ServerCfg *ServerConfig
//ServerConfig is config for serverCGI struct.
type ServerConfig struct {
RecentRange int64
Motd string
NodeManager *node.Manager
InitNode *util.ConfList
UpdateQue *thread.UpdateQue
RecentList *thread.RecentList
}
//serverCGI is for server.cgi handler.
type serverCGI struct {
*ServerConfig
*cgi
}
//newServerCGI set content-type to text and returns serverCGI obj.
func newServerCGI(w http.ResponseWriter, r *http.Request) (serverCGI, error) {
if ServerCfg == nil {
log.Fatal("must set ServerCfg")
}
c := serverCGI{
ServerConfig: ServerCfg,
cgi: newCGI(w, r),
}
if c.cgi == nil {
return c, errors.New("cannot make CGI")
}
if w != nil {
w.Header().Set("Content-Type", "text/plain")
}
return c, nil
}
//remoteIP returns host if host!=""
//else returns remoteaddr
func (s *serverCGI) remoteIP(host string) string {
if host != "" {
return host
}
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if !isGlobal(remoteAddr) {
log.Println(remoteAddr, "is local IP")
return ""
}
return remoteAddr
}
func isGlobal(remoteAddr string) bool {
ip := net.ParseIP(remoteAddr)
if ip == nil {
log.Println(remoteAddr,"has illegal format")
return false
}
return nat.IsGlobalIP(ip) != ""
}
//checkRemote returns remoteaddr
//if host is specified returns remoteaddr if host==remoteaddr.
func (s *serverCGI) checkRemote(host string) string {
remoteAddr, _, err := net.SplitHostPort(s.req.RemoteAddr)
if err != nil {
log.Println(err)
return ""
}
if host == "" {
return remoteAddr
}
ipaddr, err := net.LookupIP(host)
if err != nil {
log.Println(err)
return ""
}
for _, ipa := range ipaddr {
if ipa.String() == remoteAddr {
return remoteAddr
}
}
return ""
}
//makeNode makes and returns node obj from /method/ip:port.
func (s *serverCGI) extractHost(method string) (string, string, int) {
reg := regexp.MustCompile("^" + method + `/([^\+]*)(\+.*)`)
m := reg.FindStringSubmatch(s.path())
if m == nil {
log.Println("illegal url")
return "", "", 0
}
path := m[2]
host, portstr, err := net.SplitHostPort(m[1])
if err != nil {
log.Println(err)
return "", "", 0
}
port, err := strconv.Atoi(portstr)
if err != nil {
log.Println(err)
return "", "", 0
}
return host, path, port
}
//parseStamp parses format beginStamp - endStamp/id and returns them.
//if endStamp is not specified returns last as endStamp.
func (s *serverCGI) parseStamp(stamp string, last int64) (int64, int64, string) {
buf := strings.Split(stamp, "/")
var id string
if len(buf) > 1 {
id = buf[1]
stamp = buf[0]
}
buf = strings.Split(stamp, "-")
nstamps := make([]int64, len(buf))
var err error
for i, nstamp := range buf {
if nstamp == "" {
continue
}
nstamps[i], err = strconv.ParseInt(nstamp, 10, 64)
if err != nil {
return 0, 0, ""
}
}
switch {
case stamp == "", stamp == "-":
return 0, last, id
case strings.HasSuffix(stamp, "-"):
return nstamps[0], last, id
case len(buf) == 1:
return nstamps[0], nstamps[0], id
case buf[0] == "":
return 0, nstamps[1], id
default:
return nstamps[0], nstamps[1], id
}
} | random_line_split |
|
image_convolution.py | import os
import PIL.Image
import time
from Tkinter import *
# =============================================Initialize Variables=============================================#
size = 256, 256 # Size of thumbnail image displayed
newValue = list((0, 0, 0))
convMask = 3
normalizer = 1
errorMessage = ""
previewBox = 0
convMatrix = [[0 for x in range(convMask)] for x in range(convMask)] # matrix used for 2D image convolution
newColor = list((0, 0, 0))
for x in range(0, convMask):
for y in range(0, convMask):
convMatrix[x][y] = 0
# cnt = cnt+1
convMatrix[1][1] = 1
# ----------------------------------------------Load Images----------------------------------------------#
image = PIL.Image.open("bumbleKoda.png") # Open default image to memory
thumbnailImage = PIL.Image.open("bumbleKoda.png") # Open another copy of image, to be used as thumbnail
thumbnailImage.thumbnail(size, PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max 'size' of size
# ----------------------------------------------Pre Process Images----------------------------------------------#
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if other
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
pixels = image.load() # Holds all pixel data as a 3 tuple in a 2D array
thumbnailPixels = thumbnailImage.load()
newPixels = pixels # To be used when processing, will hold new image while processing
imageWidth = image.size[0]
imageHeight = image.size[1]
# =============================================Initialize GUI=============================================#
root = Tk() # Initialize Tkinter for GUI
# ----------------------------------------------GUI Functions----------------------------------------------#
def image_load(): # loads the image and displays it on screen
global thumbnailImage
global pixels
global thumbnailPixels
global newPixels
global image
global imageWidth
global imageHeight
global size
global errorMessage
global previewBox
global newImage
filePath = path.get() # Retrieve file path from UI
start = time.clock() # timer (debug message)
if filePath == "":
errorMessage = "Error: Image path is blank"
update_error()
elif os.path.isfile(filePath) == FALSE:
errorMessage = "Error: File does not exist"
update_error()
else:
image = PIL.Image.open(filePath) # Open image to memory
newImage = image
thumbnailImage = PIL.Image.open(filePath) # Open another copy of image, to be used as thumbnail
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if grayscale/monotone
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
imageWidth = image.size[0]
imageHeight = image.size[1]
pixels = image.load() # 2D array containing all of the pixel data in image
thumbnailPixels = thumbnailImage.load() # 2D array containing all fo the pixel data in thumbnailImage
newPixels = newImage.load() # to be used in processing, holds new image while processing
thumbnailImage.thumbnail(size,
PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max width and height of 'size'
thumbnailImage.save("tempThumbnail.gif") # image to be loaded to UI
photo = PhotoImage(file="tempThumbnail.gif") # load image to UI
display_image.configure(image=photo)
display_image.photo = photo
stop = time.clock() # timer (debug message)
print "Image loaded and displayed in %f seconds." % (stop - start) # debug message
errorMessage = "" # Clears error message on UI
update_error()
def apply_matrix(): # Need to properly set this up!
global pixels
global newPixels
global image
global imageHeight
global imageWidth
global newImage
global convMatrix
global convMask
global normalizer
global previewBox
if previewBox:
|
else:
imageStart = 2
imageStopWidth = imageWidth-2
imageStopHeight = imageHeight-2
start = time.clock() # timer (debug message)
for x in range(imageStart, imageStopWidth): # Image Rows, ignore outside pixels
print x,"/",(imageStopWidth)
for y in range(imageStart, imageStopHeight): # Image Columns, ignore outside pixels
newColor = list((0, 0, 0)) # clear newColor for next loop
for r in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- X values for convolution
for q in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- Y values for convolution
color = list(pixels[x + r, y + q]) # receive color of pixel being weighted and added
for i in range(0, 3): # for each R, G, and B
newValue[i] = color[i] * convMatrix[q + 1][r + 1] / normalizer
newColor[i] = newColor[i] + newValue[i] # sum all in r and q area
for j in range(0, 3): # clip R,G,B channels
if newColor[j] > 255:
newColor[j] = 255
elif newColor[j] < 0:
newColor[j] = 0
newPixels[x, y] = tuple(newColor) # convert back to tuple, store in new location
newImage.save("processedImage.png")
newImage.thumbnail(size, PIL.Image.ANTIALIAS) # processed image to be displayed to UI
newImage.save("processedImageThumbnail.gif")
newImage = PIL.Image.open("processedImage.png") #reload to avoid resize issues
update_image()
stop = time.clock() # timer (debug message)
print "Image processed in", (stop - start), "seconds." # debug message
def update_image(): # Updates image displayed on UI to most recently processed one
photo = PhotoImage(file="processedImageThumbnail.gif")
display_image.configure(image=photo)
display_image.photo = photo
def update_matrix(): # updates the normalizer and each value of the convolution matrix to what was entered by user
global normalizer
global convMatrix
convMatrix[0][0] = int(matrix_1_1.get())
convMatrix[0][1] = int(matrix_1_2.get())
convMatrix[0][2] = int(matrix_1_3.get())
convMatrix[1][0] = int(matrix_2_1.get())
convMatrix[1][1] = int(matrix_2_2.get())
convMatrix[1][2] = int(matrix_2_3.get())
convMatrix[2][0] = int(matrix_3_1.get())
convMatrix[2][1] = int(matrix_3_2.get())
convMatrix[2][2] = int(matrix_3_3.get())
normalizer = int(normalizer_entry.get())
def update_error(): # updates the error message displayed on screen
global error_message
error_message.configure(text=errorMessage) # updates text displayed
def swap_checkbox_value():
global previewBox
if previewBox == 1:
previewBox=0;
else:
previewBox=1;
print previewBox
# ----------------------------------------------GUI Widgets----------------------------------------------#
# -------------------------Left Side Widgets-------------------------#
frame = Frame(root, bg="white") # base frame for other elements
frame.pack(side=LEFT)
quit_button = Button(frame, text="QUIT", command=frame.quit)
quit_button.pack(side=BOTTOM, fill=X)
apply_filter = Button(frame, text="Apply Matrix Filter", command=apply_matrix)
apply_filter.pack(side=TOP, fill=X)
preview_checkbox = Checkbutton(frame, text="Small Section Preview", command=swap_checkbox_value)
preview_checkbox.pack(side=TOP, fill=X)
load_image = Button(frame, text="Load Image", command=image_load)
load_image.pack(side=TOP, fill=X)
path = Entry(frame) # text entry field, for Load image
path.pack(side=TOP, fill=X)
photo = PhotoImage(file="blankThumbnail.gif")
display_image = Label(frame, image=photo)
display_image.photo = photo
display_image.pack(side=BOTTOM)
# -------------------------Right Side Widgets-------------------------#
frame_right = Frame(root) #main right frame
frame_right.pack(side=RIGHT)
frame_right_first = Frame(frame_right) #holds Update button and normalizer entry
frame_right_first.pack(side=TOP)
frame_right_second = Frame(frame_right) #holds first row of convolution matrix
frame_right_second.pack(side=TOP)
frame_right_third = Frame(frame_right) #holds second row of convolution matrix
frame_right_third.pack(side=TOP)
frame_right_fourth = Frame(frame_right) #holds third row of convolution matrix
frame_right_fourth.pack(side=TOP)
frame_right_fifth = Frame(frame_right) #hold error message
frame_right_fifth.pack(side=TOP)
update_matrix_button = Button(frame_right_first, text="Update Matrix", command=update_matrix)
update_matrix_button.pack(side=LEFT)
normalizer_entry = Entry(frame_right_first, width=2)
normalizer_entry.pack(side=LEFT)
matrix_1_1 = Entry(frame_right_second, width=2)
matrix_1_1.pack(side=LEFT)
matrix_1_2 = Entry(frame_right_second, width=2)
matrix_1_2.pack(side=LEFT)
matrix_1_3 = Entry(frame_right_second, width=2)
matrix_1_3.pack(side=LEFT)
matrix_2_1 = Entry(frame_right_third, width=2)
matrix_2_1.pack(side=LEFT)
matrix_2_2 = Entry(frame_right_third, width=2)
matrix_2_2.pack(side=LEFT)
matrix_2_3 = Entry(frame_right_third, width=2)
matrix_2_3.pack(side=LEFT)
matrix_3_1 = Entry(frame_right_fourth, width=2)
matrix_3_1.pack(side=LEFT)
matrix_3_2 = Entry(frame_right_fourth, width=2)
matrix_3_2.pack(side=LEFT)
matrix_3_3 = Entry(frame_right_fourth, width=2)
matrix_3_3.pack(side=LEFT)
error_message = Label(frame_right_fifth, relief=RIDGE, wraplength=150)
error_message.pack(side=LEFT)
# =============================================Run GUI=============================================#
root.mainloop() # main loop for Tkint
root.destroy() # clears the window, fully ending task
if os.path.isfile("tempThumbnail.gif"): # clean up working directory of temp files
os.remove("tempThumbnail.gif")
if os.path.isfile("processedImageThumbnail.gif"):
os.remove("processedImageThumbnail.gif")
| imageStart = 2
imageStopWidth = 128
imageStopHeight = 128 | conditional_block |
image_convolution.py | import os
import PIL.Image
import time
from Tkinter import *
# =============================================Initialize Variables=============================================#
size = 256, 256 # Size of thumbnail image displayed
newValue = list((0, 0, 0))
convMask = 3
normalizer = 1
errorMessage = ""
previewBox = 0
convMatrix = [[0 for x in range(convMask)] for x in range(convMask)] # matrix used for 2D image convolution
newColor = list((0, 0, 0))
for x in range(0, convMask):
for y in range(0, convMask):
convMatrix[x][y] = 0
# cnt = cnt+1
convMatrix[1][1] = 1
# ----------------------------------------------Load Images----------------------------------------------#
image = PIL.Image.open("bumbleKoda.png") # Open default image to memory
thumbnailImage = PIL.Image.open("bumbleKoda.png") # Open another copy of image, to be used as thumbnail
thumbnailImage.thumbnail(size, PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max 'size' of size
# ----------------------------------------------Pre Process Images----------------------------------------------#
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if other
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
pixels = image.load() # Holds all pixel data as a 3 tuple in a 2D array
thumbnailPixels = thumbnailImage.load()
newPixels = pixels # To be used when processing, will hold new image while processing
imageWidth = image.size[0]
imageHeight = image.size[1]
# =============================================Initialize GUI=============================================#
root = Tk() # Initialize Tkinter for GUI
# ----------------------------------------------GUI Functions----------------------------------------------#
def image_load(): # loads the image and displays it on screen
global thumbnailImage
global pixels
global thumbnailPixels
global newPixels
global image
global imageWidth
global imageHeight
global size
global errorMessage
global previewBox
global newImage
filePath = path.get() # Retrieve file path from UI
start = time.clock() # timer (debug message)
if filePath == "":
errorMessage = "Error: Image path is blank"
update_error()
elif os.path.isfile(filePath) == FALSE:
errorMessage = "Error: File does not exist"
update_error()
else:
image = PIL.Image.open(filePath) # Open image to memory
newImage = image
thumbnailImage = PIL.Image.open(filePath) # Open another copy of image, to be used as thumbnail
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if grayscale/monotone
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
imageWidth = image.size[0]
imageHeight = image.size[1]
pixels = image.load() # 2D array containing all of the pixel data in image
thumbnailPixels = thumbnailImage.load() # 2D array containing all fo the pixel data in thumbnailImage
newPixels = newImage.load() # to be used in processing, holds new image while processing
thumbnailImage.thumbnail(size,
PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max width and height of 'size'
thumbnailImage.save("tempThumbnail.gif") # image to be loaded to UI
photo = PhotoImage(file="tempThumbnail.gif") # load image to UI
display_image.configure(image=photo)
display_image.photo = photo
stop = time.clock() # timer (debug message)
print "Image loaded and displayed in %f seconds." % (stop - start) # debug message
errorMessage = "" # Clears error message on UI
update_error()
def apply_matrix(): # Need to properly set this up!
global pixels
global newPixels
global image
global imageHeight
global imageWidth
global newImage
global convMatrix
global convMask
global normalizer
global previewBox
if previewBox:
imageStart = 2
imageStopWidth = 128
imageStopHeight = 128
else:
imageStart = 2
imageStopWidth = imageWidth-2
imageStopHeight = imageHeight-2
start = time.clock() # timer (debug message)
for x in range(imageStart, imageStopWidth): # Image Rows, ignore outside pixels
print x,"/",(imageStopWidth)
for y in range(imageStart, imageStopHeight): # Image Columns, ignore outside pixels
newColor = list((0, 0, 0)) # clear newColor for next loop
for r in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- X values for convolution
for q in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- Y values for convolution
color = list(pixels[x + r, y + q]) # receive color of pixel being weighted and added
for i in range(0, 3): # for each R, G, and B
newValue[i] = color[i] * convMatrix[q + 1][r + 1] / normalizer
newColor[i] = newColor[i] + newValue[i] # sum all in r and q area
for j in range(0, 3): # clip R,G,B channels
if newColor[j] > 255:
newColor[j] = 255
elif newColor[j] < 0:
newColor[j] = 0
newPixels[x, y] = tuple(newColor) # convert back to tuple, store in new location
newImage.save("processedImage.png")
newImage.thumbnail(size, PIL.Image.ANTIALIAS) # processed image to be displayed to UI
newImage.save("processedImageThumbnail.gif")
newImage = PIL.Image.open("processedImage.png") #reload to avoid resize issues
update_image()
stop = time.clock() # timer (debug message)
print "Image processed in", (stop - start), "seconds." # debug message
def update_image(): # Updates image displayed on UI to most recently processed one
photo = PhotoImage(file="processedImageThumbnail.gif")
display_image.configure(image=photo)
display_image.photo = photo
def | (): # updates the normalizer and each value of the convolution matrix to what was entered by user
global normalizer
global convMatrix
convMatrix[0][0] = int(matrix_1_1.get())
convMatrix[0][1] = int(matrix_1_2.get())
convMatrix[0][2] = int(matrix_1_3.get())
convMatrix[1][0] = int(matrix_2_1.get())
convMatrix[1][1] = int(matrix_2_2.get())
convMatrix[1][2] = int(matrix_2_3.get())
convMatrix[2][0] = int(matrix_3_1.get())
convMatrix[2][1] = int(matrix_3_2.get())
convMatrix[2][2] = int(matrix_3_3.get())
normalizer = int(normalizer_entry.get())
def update_error(): # updates the error message displayed on screen
global error_message
error_message.configure(text=errorMessage) # updates text displayed
def swap_checkbox_value():
global previewBox
if previewBox == 1:
previewBox=0;
else:
previewBox=1;
print previewBox
# ----------------------------------------------GUI Widgets----------------------------------------------#
# -------------------------Left Side Widgets-------------------------#
frame = Frame(root, bg="white") # base frame for other elements
frame.pack(side=LEFT)
quit_button = Button(frame, text="QUIT", command=frame.quit)
quit_button.pack(side=BOTTOM, fill=X)
apply_filter = Button(frame, text="Apply Matrix Filter", command=apply_matrix)
apply_filter.pack(side=TOP, fill=X)
preview_checkbox = Checkbutton(frame, text="Small Section Preview", command=swap_checkbox_value)
preview_checkbox.pack(side=TOP, fill=X)
load_image = Button(frame, text="Load Image", command=image_load)
load_image.pack(side=TOP, fill=X)
path = Entry(frame) # text entry field, for Load image
path.pack(side=TOP, fill=X)
photo = PhotoImage(file="blankThumbnail.gif")
display_image = Label(frame, image=photo)
display_image.photo = photo
display_image.pack(side=BOTTOM)
# -------------------------Right Side Widgets-------------------------#
frame_right = Frame(root) #main right frame
frame_right.pack(side=RIGHT)
frame_right_first = Frame(frame_right) #holds Update button and normalizer entry
frame_right_first.pack(side=TOP)
frame_right_second = Frame(frame_right) #holds first row of convolution matrix
frame_right_second.pack(side=TOP)
frame_right_third = Frame(frame_right) #holds second row of convolution matrix
frame_right_third.pack(side=TOP)
frame_right_fourth = Frame(frame_right) #holds third row of convolution matrix
frame_right_fourth.pack(side=TOP)
frame_right_fifth = Frame(frame_right) #hold error message
frame_right_fifth.pack(side=TOP)
update_matrix_button = Button(frame_right_first, text="Update Matrix", command=update_matrix)
update_matrix_button.pack(side=LEFT)
normalizer_entry = Entry(frame_right_first, width=2)
normalizer_entry.pack(side=LEFT)
matrix_1_1 = Entry(frame_right_second, width=2)
matrix_1_1.pack(side=LEFT)
matrix_1_2 = Entry(frame_right_second, width=2)
matrix_1_2.pack(side=LEFT)
matrix_1_3 = Entry(frame_right_second, width=2)
matrix_1_3.pack(side=LEFT)
matrix_2_1 = Entry(frame_right_third, width=2)
matrix_2_1.pack(side=LEFT)
matrix_2_2 = Entry(frame_right_third, width=2)
matrix_2_2.pack(side=LEFT)
matrix_2_3 = Entry(frame_right_third, width=2)
matrix_2_3.pack(side=LEFT)
matrix_3_1 = Entry(frame_right_fourth, width=2)
matrix_3_1.pack(side=LEFT)
matrix_3_2 = Entry(frame_right_fourth, width=2)
matrix_3_2.pack(side=LEFT)
matrix_3_3 = Entry(frame_right_fourth, width=2)
matrix_3_3.pack(side=LEFT)
error_message = Label(frame_right_fifth, relief=RIDGE, wraplength=150)
error_message.pack(side=LEFT)
# =============================================Run GUI=============================================#
root.mainloop() # main loop for Tkint
root.destroy() # clears the window, fully ending task
if os.path.isfile("tempThumbnail.gif"): # clean up working directory of temp files
os.remove("tempThumbnail.gif")
if os.path.isfile("processedImageThumbnail.gif"):
os.remove("processedImageThumbnail.gif")
| update_matrix | identifier_name |
image_convolution.py | import os
import PIL.Image
import time
from Tkinter import *
# =============================================Initialize Variables=============================================#
size = 256, 256 # Size of thumbnail image displayed
newValue = list((0, 0, 0))
convMask = 3
normalizer = 1
errorMessage = ""
previewBox = 0
convMatrix = [[0 for x in range(convMask)] for x in range(convMask)] # matrix used for 2D image convolution
newColor = list((0, 0, 0))
for x in range(0, convMask):
for y in range(0, convMask):
convMatrix[x][y] = 0
# cnt = cnt+1
convMatrix[1][1] = 1
# ----------------------------------------------Load Images----------------------------------------------#
image = PIL.Image.open("bumbleKoda.png") # Open default image to memory
thumbnailImage = PIL.Image.open("bumbleKoda.png") # Open another copy of image, to be used as thumbnail
thumbnailImage.thumbnail(size, PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max 'size' of size
# ----------------------------------------------Pre Process Images----------------------------------------------#
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if other
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
pixels = image.load() # Holds all pixel data as a 3 tuple in a 2D array
thumbnailPixels = thumbnailImage.load()
newPixels = pixels # To be used when processing, will hold new image while processing
imageWidth = image.size[0]
imageHeight = image.size[1]
# =============================================Initialize GUI=============================================#
root = Tk() # Initialize Tkinter for GUI
# ----------------------------------------------GUI Functions----------------------------------------------#
def image_load(): # loads the image and displays it on screen
global thumbnailImage
global pixels
global thumbnailPixels
global newPixels
global image
global imageWidth
global imageHeight
global size
global errorMessage
global previewBox
global newImage
filePath = path.get() # Retrieve file path from UI
start = time.clock() # timer (debug message)
if filePath == "":
errorMessage = "Error: Image path is blank"
update_error()
elif os.path.isfile(filePath) == FALSE:
errorMessage = "Error: File does not exist"
update_error()
else:
image = PIL.Image.open(filePath) # Open image to memory
newImage = image
thumbnailImage = PIL.Image.open(filePath) # Open another copy of image, to be used as thumbnail
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if grayscale/monotone
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
imageWidth = image.size[0]
imageHeight = image.size[1]
pixels = image.load() # 2D array containing all of the pixel data in image
thumbnailPixels = thumbnailImage.load() # 2D array containing all fo the pixel data in thumbnailImage
newPixels = newImage.load() # to be used in processing, holds new image while processing
thumbnailImage.thumbnail(size,
PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max width and height of 'size'
thumbnailImage.save("tempThumbnail.gif") # image to be loaded to UI
photo = PhotoImage(file="tempThumbnail.gif") # load image to UI
display_image.configure(image=photo)
display_image.photo = photo
stop = time.clock() # timer (debug message)
print "Image loaded and displayed in %f seconds." % (stop - start) # debug message
errorMessage = "" # Clears error message on UI
update_error()
def apply_matrix(): # Need to properly set this up!
global pixels
global newPixels
global image
global imageHeight
global imageWidth
global newImage
global convMatrix
global convMask
global normalizer
global previewBox
if previewBox:
imageStart = 2
imageStopWidth = 128
imageStopHeight = 128
else:
imageStart = 2
imageStopWidth = imageWidth-2
imageStopHeight = imageHeight-2
start = time.clock() # timer (debug message)
for x in range(imageStart, imageStopWidth): # Image Rows, ignore outside pixels
print x,"/",(imageStopWidth)
for y in range(imageStart, imageStopHeight): # Image Columns, ignore outside pixels
newColor = list((0, 0, 0)) # clear newColor for next loop
for r in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- X values for convolution
for q in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- Y values for convolution
color = list(pixels[x + r, y + q]) # receive color of pixel being weighted and added
for i in range(0, 3): # for each R, G, and B
newValue[i] = color[i] * convMatrix[q + 1][r + 1] / normalizer
newColor[i] = newColor[i] + newValue[i] # sum all in r and q area
for j in range(0, 3): # clip R,G,B channels
if newColor[j] > 255:
newColor[j] = 255
elif newColor[j] < 0:
newColor[j] = 0
newPixels[x, y] = tuple(newColor) # convert back to tuple, store in new location
newImage.save("processedImage.png")
newImage.thumbnail(size, PIL.Image.ANTIALIAS) # processed image to be displayed to UI
newImage.save("processedImageThumbnail.gif")
newImage = PIL.Image.open("processedImage.png") #reload to avoid resize issues
update_image()
stop = time.clock() # timer (debug message)
print "Image processed in", (stop - start), "seconds." # debug message
def update_image(): # Updates image displayed on UI to most recently processed one
photo = PhotoImage(file="processedImageThumbnail.gif")
display_image.configure(image=photo)
display_image.photo = photo
def update_matrix(): # updates the normalizer and each value of the convolution matrix to what was entered by user
global normalizer
global convMatrix
convMatrix[0][0] = int(matrix_1_1.get())
convMatrix[0][1] = int(matrix_1_2.get())
convMatrix[0][2] = int(matrix_1_3.get())
convMatrix[1][0] = int(matrix_2_1.get())
convMatrix[1][1] = int(matrix_2_2.get())
convMatrix[1][2] = int(matrix_2_3.get())
convMatrix[2][0] = int(matrix_3_1.get())
convMatrix[2][1] = int(matrix_3_2.get())
convMatrix[2][2] = int(matrix_3_3.get())
normalizer = int(normalizer_entry.get())
def update_error(): # updates the error message displayed on screen
global error_message
error_message.configure(text=errorMessage) # updates text displayed
def swap_checkbox_value():
global previewBox
if previewBox == 1:
previewBox=0;
else:
previewBox=1;
print previewBox
# ----------------------------------------------GUI Widgets----------------------------------------------#
# -------------------------Left Side Widgets-------------------------#
frame = Frame(root, bg="white") # base frame for other elements
frame.pack(side=LEFT) | apply_filter.pack(side=TOP, fill=X)
preview_checkbox = Checkbutton(frame, text="Small Section Preview", command=swap_checkbox_value)
preview_checkbox.pack(side=TOP, fill=X)
load_image = Button(frame, text="Load Image", command=image_load)
load_image.pack(side=TOP, fill=X)
path = Entry(frame) # text entry field, for Load image
path.pack(side=TOP, fill=X)
photo = PhotoImage(file="blankThumbnail.gif")
display_image = Label(frame, image=photo)
display_image.photo = photo
display_image.pack(side=BOTTOM)
# -------------------------Right Side Widgets-------------------------#
frame_right = Frame(root) #main right frame
frame_right.pack(side=RIGHT)
frame_right_first = Frame(frame_right) #holds Update button and normalizer entry
frame_right_first.pack(side=TOP)
frame_right_second = Frame(frame_right) #holds first row of convolution matrix
frame_right_second.pack(side=TOP)
frame_right_third = Frame(frame_right) #holds second row of convolution matrix
frame_right_third.pack(side=TOP)
frame_right_fourth = Frame(frame_right) #holds third row of convolution matrix
frame_right_fourth.pack(side=TOP)
frame_right_fifth = Frame(frame_right) #hold error message
frame_right_fifth.pack(side=TOP)
update_matrix_button = Button(frame_right_first, text="Update Matrix", command=update_matrix)
update_matrix_button.pack(side=LEFT)
normalizer_entry = Entry(frame_right_first, width=2)
normalizer_entry.pack(side=LEFT)
matrix_1_1 = Entry(frame_right_second, width=2)
matrix_1_1.pack(side=LEFT)
matrix_1_2 = Entry(frame_right_second, width=2)
matrix_1_2.pack(side=LEFT)
matrix_1_3 = Entry(frame_right_second, width=2)
matrix_1_3.pack(side=LEFT)
matrix_2_1 = Entry(frame_right_third, width=2)
matrix_2_1.pack(side=LEFT)
matrix_2_2 = Entry(frame_right_third, width=2)
matrix_2_2.pack(side=LEFT)
matrix_2_3 = Entry(frame_right_third, width=2)
matrix_2_3.pack(side=LEFT)
matrix_3_1 = Entry(frame_right_fourth, width=2)
matrix_3_1.pack(side=LEFT)
matrix_3_2 = Entry(frame_right_fourth, width=2)
matrix_3_2.pack(side=LEFT)
matrix_3_3 = Entry(frame_right_fourth, width=2)
matrix_3_3.pack(side=LEFT)
error_message = Label(frame_right_fifth, relief=RIDGE, wraplength=150)
error_message.pack(side=LEFT)
# =============================================Run GUI=============================================#
root.mainloop() # main loop for Tkint
root.destroy() # clears the window, fully ending task
if os.path.isfile("tempThumbnail.gif"): # clean up working directory of temp files
os.remove("tempThumbnail.gif")
if os.path.isfile("processedImageThumbnail.gif"):
os.remove("processedImageThumbnail.gif") |
quit_button = Button(frame, text="QUIT", command=frame.quit)
quit_button.pack(side=BOTTOM, fill=X)
apply_filter = Button(frame, text="Apply Matrix Filter", command=apply_matrix) | random_line_split |
image_convolution.py | import os
import PIL.Image
import time
from Tkinter import *
# =============================================Initialize Variables=============================================#
size = 256, 256 # Size of thumbnail image displayed
newValue = list((0, 0, 0))
convMask = 3
normalizer = 1
errorMessage = ""
previewBox = 0
convMatrix = [[0 for x in range(convMask)] for x in range(convMask)] # matrix used for 2D image convolution
newColor = list((0, 0, 0))
for x in range(0, convMask):
for y in range(0, convMask):
convMatrix[x][y] = 0
# cnt = cnt+1
convMatrix[1][1] = 1
# ----------------------------------------------Load Images----------------------------------------------#
image = PIL.Image.open("bumbleKoda.png") # Open default image to memory
thumbnailImage = PIL.Image.open("bumbleKoda.png") # Open another copy of image, to be used as thumbnail
thumbnailImage.thumbnail(size, PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max 'size' of size
# ----------------------------------------------Pre Process Images----------------------------------------------#
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if other
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
pixels = image.load() # Holds all pixel data as a 3 tuple in a 2D array
thumbnailPixels = thumbnailImage.load()
newPixels = pixels # To be used when processing, will hold new image while processing
imageWidth = image.size[0]
imageHeight = image.size[1]
# =============================================Initialize GUI=============================================#
root = Tk() # Initialize Tkinter for GUI
# ----------------------------------------------GUI Functions----------------------------------------------#
def image_load(): # loads the image and displays it on screen
global thumbnailImage
global pixels
global thumbnailPixels
global newPixels
global image
global imageWidth
global imageHeight
global size
global errorMessage
global previewBox
global newImage
filePath = path.get() # Retrieve file path from UI
start = time.clock() # timer (debug message)
if filePath == "":
errorMessage = "Error: Image path is blank"
update_error()
elif os.path.isfile(filePath) == FALSE:
errorMessage = "Error: File does not exist"
update_error()
else:
image = PIL.Image.open(filePath) # Open image to memory
newImage = image
thumbnailImage = PIL.Image.open(filePath) # Open another copy of image, to be used as thumbnail
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if grayscale/monotone
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
imageWidth = image.size[0]
imageHeight = image.size[1]
pixels = image.load() # 2D array containing all of the pixel data in image
thumbnailPixels = thumbnailImage.load() # 2D array containing all fo the pixel data in thumbnailImage
newPixels = newImage.load() # to be used in processing, holds new image while processing
thumbnailImage.thumbnail(size,
PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max width and height of 'size'
thumbnailImage.save("tempThumbnail.gif") # image to be loaded to UI
photo = PhotoImage(file="tempThumbnail.gif") # load image to UI
display_image.configure(image=photo)
display_image.photo = photo
stop = time.clock() # timer (debug message)
print "Image loaded and displayed in %f seconds." % (stop - start) # debug message
errorMessage = "" # Clears error message on UI
update_error()
def apply_matrix(): # Need to properly set this up!
global pixels
global newPixels
global image
global imageHeight
global imageWidth
global newImage
global convMatrix
global convMask
global normalizer
global previewBox
if previewBox:
imageStart = 2
imageStopWidth = 128
imageStopHeight = 128
else:
imageStart = 2
imageStopWidth = imageWidth-2
imageStopHeight = imageHeight-2
start = time.clock() # timer (debug message)
for x in range(imageStart, imageStopWidth): # Image Rows, ignore outside pixels
print x,"/",(imageStopWidth)
for y in range(imageStart, imageStopHeight): # Image Columns, ignore outside pixels
newColor = list((0, 0, 0)) # clear newColor for next loop
for r in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- X values for convolution
for q in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- Y values for convolution
color = list(pixels[x + r, y + q]) # receive color of pixel being weighted and added
for i in range(0, 3): # for each R, G, and B
newValue[i] = color[i] * convMatrix[q + 1][r + 1] / normalizer
newColor[i] = newColor[i] + newValue[i] # sum all in r and q area
for j in range(0, 3): # clip R,G,B channels
if newColor[j] > 255:
newColor[j] = 255
elif newColor[j] < 0:
newColor[j] = 0
newPixels[x, y] = tuple(newColor) # convert back to tuple, store in new location
newImage.save("processedImage.png")
newImage.thumbnail(size, PIL.Image.ANTIALIAS) # processed image to be displayed to UI
newImage.save("processedImageThumbnail.gif")
newImage = PIL.Image.open("processedImage.png") #reload to avoid resize issues
update_image()
stop = time.clock() # timer (debug message)
print "Image processed in", (stop - start), "seconds." # debug message
def update_image(): # Updates image displayed on UI to most recently processed one
|
def update_matrix(): # updates the normalizer and each value of the convolution matrix to what was entered by user
global normalizer
global convMatrix
convMatrix[0][0] = int(matrix_1_1.get())
convMatrix[0][1] = int(matrix_1_2.get())
convMatrix[0][2] = int(matrix_1_3.get())
convMatrix[1][0] = int(matrix_2_1.get())
convMatrix[1][1] = int(matrix_2_2.get())
convMatrix[1][2] = int(matrix_2_3.get())
convMatrix[2][0] = int(matrix_3_1.get())
convMatrix[2][1] = int(matrix_3_2.get())
convMatrix[2][2] = int(matrix_3_3.get())
normalizer = int(normalizer_entry.get())
def update_error(): # updates the error message displayed on screen
global error_message
error_message.configure(text=errorMessage) # updates text displayed
def swap_checkbox_value():
global previewBox
if previewBox == 1:
previewBox=0;
else:
previewBox=1;
print previewBox
# ----------------------------------------------GUI Widgets----------------------------------------------#
# -------------------------Left Side Widgets-------------------------#
frame = Frame(root, bg="white") # base frame for other elements
frame.pack(side=LEFT)
quit_button = Button(frame, text="QUIT", command=frame.quit)
quit_button.pack(side=BOTTOM, fill=X)
apply_filter = Button(frame, text="Apply Matrix Filter", command=apply_matrix)
apply_filter.pack(side=TOP, fill=X)
preview_checkbox = Checkbutton(frame, text="Small Section Preview", command=swap_checkbox_value)
preview_checkbox.pack(side=TOP, fill=X)
load_image = Button(frame, text="Load Image", command=image_load)
load_image.pack(side=TOP, fill=X)
path = Entry(frame) # text entry field, for Load image
path.pack(side=TOP, fill=X)
photo = PhotoImage(file="blankThumbnail.gif")
display_image = Label(frame, image=photo)
display_image.photo = photo
display_image.pack(side=BOTTOM)
# -------------------------Right Side Widgets-------------------------#
frame_right = Frame(root) #main right frame
frame_right.pack(side=RIGHT)
frame_right_first = Frame(frame_right) #holds Update button and normalizer entry
frame_right_first.pack(side=TOP)
frame_right_second = Frame(frame_right) #holds first row of convolution matrix
frame_right_second.pack(side=TOP)
frame_right_third = Frame(frame_right) #holds second row of convolution matrix
frame_right_third.pack(side=TOP)
frame_right_fourth = Frame(frame_right) #holds third row of convolution matrix
frame_right_fourth.pack(side=TOP)
frame_right_fifth = Frame(frame_right) #hold error message
frame_right_fifth.pack(side=TOP)
update_matrix_button = Button(frame_right_first, text="Update Matrix", command=update_matrix)
update_matrix_button.pack(side=LEFT)
normalizer_entry = Entry(frame_right_first, width=2)
normalizer_entry.pack(side=LEFT)
matrix_1_1 = Entry(frame_right_second, width=2)
matrix_1_1.pack(side=LEFT)
matrix_1_2 = Entry(frame_right_second, width=2)
matrix_1_2.pack(side=LEFT)
matrix_1_3 = Entry(frame_right_second, width=2)
matrix_1_3.pack(side=LEFT)
matrix_2_1 = Entry(frame_right_third, width=2)
matrix_2_1.pack(side=LEFT)
matrix_2_2 = Entry(frame_right_third, width=2)
matrix_2_2.pack(side=LEFT)
matrix_2_3 = Entry(frame_right_third, width=2)
matrix_2_3.pack(side=LEFT)
matrix_3_1 = Entry(frame_right_fourth, width=2)
matrix_3_1.pack(side=LEFT)
matrix_3_2 = Entry(frame_right_fourth, width=2)
matrix_3_2.pack(side=LEFT)
matrix_3_3 = Entry(frame_right_fourth, width=2)
matrix_3_3.pack(side=LEFT)
error_message = Label(frame_right_fifth, relief=RIDGE, wraplength=150)
error_message.pack(side=LEFT)
# =============================================Run GUI=============================================#
root.mainloop() # main loop for Tkint
root.destroy() # clears the window, fully ending task
if os.path.isfile("tempThumbnail.gif"): # clean up working directory of temp files
os.remove("tempThumbnail.gif")
if os.path.isfile("processedImageThumbnail.gif"):
os.remove("processedImageThumbnail.gif")
| photo = PhotoImage(file="processedImageThumbnail.gif")
display_image.configure(image=photo)
display_image.photo = photo | identifier_body |
models.ts | /*
* Copyright (c) 2019 LabKey Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Ajax, Filter, Utils } from '@labkey/api';
import { List, Map, Record } from 'immutable';
import { Option } from 'react-select';
import { getEditorModel } from '../../global';
import { insertRows } from '../../query/api';
import { gridShowError } from '../../actions';
import { SCHEMAS } from '../base/models/schemas';
import { QueryColumn, QueryGridModel, QueryInfo, SchemaQuery } from '../base/models/model';
import { generateId } from '../../util/utils';
import { buildURL } from '../../url/ActionURL';
export interface SampleInputProps {
role: string
rowId: number
}
export interface IDerivePayload {
dataInputs?: Array<SampleInputProps>
materialDefault?: any
materialInputs?: Array<SampleInputProps>
materialOutputCount?: number
materialOutputs?: Array<{[key: string]: any}>
targetSampleSet: string
}
export interface IParentOption extends Option {
query?: string
schema?: string
}
export interface DisplayObject {
displayValue: any,
value: any
}
export class SampleSetParentType extends Record({
index: undefined,
key: undefined,
query: undefined,
schema: undefined,
value: undefined,
}) {
index: number;
key: string;
query: string;
schema: string;
value: List<DisplayObject>;
constructor(values?: any) {
super(values);
}
static create(values: any) {
if (!values.key)
values.key = generateId('parent-type-');
return new SampleSetParentType(values);
}
}
export interface ISampleSetOption extends Option {
lsid: string
rowId: number
}
export class SampleSetOption implements ISampleSetOption {
label: string;
lsid: string;
rowId: number;
value: any;
constructor(props?: Partial<SampleSetOption>) {
if (props) {
for (let k in props) {
this[k] = props[k];
}
}
}
}
interface MaterialOutput {
created: any
createdBy: string
id: number
lsid: string
modified: any
modifiedBy: string
name: string
properties: any
sampleSet: any
}
export class GenerateSampleResponse extends Record( {
data: undefined,
message: undefined,
success: false
}) {
data: {
materialOutputs: Array<MaterialOutput>
[key: string]: any
};
message: string;
success: boolean;
constructor(values?: any) {
super(values);
}
// Get all of the rowIds of the newly generated sampleIds (or the runs)
getFilter(): Filter.IFilter {
let filterColumn: string,
filterValue;
// data.id is the run rowId. If provided, create a filter based off the run instead of sampleIds.
if (this.data.id) {
filterColumn = 'Run/RowId';
filterValue = [this.data.id];
} else {
filterColumn = 'RowId';
// if a run id was not included, filter based on generated sample Ids.
filterValue = this.data.materialOutputs.map(val => val.id);
}
return Filter.create(filterColumn, filterValue, Filter.Types.IN);
}
}
export class SampleIdCreationModel extends Record({
errors: undefined,
initialSampleSet: undefined,
isError: false,
isInit: false,
parents: Array<string>(),
parentOptions: List<IParentOption>(),
sampleParents: List<SampleSetParentType>(),
sampleSetData: Map<string, any>(),
sampleSetOptions: List<ISampleSetOption>(),
selectionKey: undefined,
targetSampleSet: undefined,
sampleCount: 0
}) {
errors: Array<any>;
initialSampleSet: any;
isError: boolean;
isInit: boolean;
parents: Array<string>; // TODO should be 'originalParents'
parentOptions: List<IParentOption>;
sampleParents: List<SampleSetParentType>;
sampleSetData: Map<string, any>;
sampleSetOptions: List<ISampleSetOption>;
selectionKey: string;
targetSampleSet: SampleSetOption;
sampleCount: number;
constructor(values?: any) {
super(values);
}
hasTargetSampleSet() : boolean {
return this.targetSampleSet && this.targetSampleSet.value
}
getTargetSampleSetName() : string {
return this.hasTargetSampleSet() ? this.targetSampleSet.value : undefined;
}
getSampleInputs(): {
dataInputs: Array<SampleInputProps>,
materialInputs: Array<SampleInputProps>
} {
let dataInputs: Array<SampleInputProps> = [],
materialInputs: Array<SampleInputProps> = [];
this.sampleParents.forEach((parent, index) => {
if (parent.value) {
const isData = parent.schema === SCHEMAS.DATA_CLASSES.SCHEMA;
const isSample = parent.schema === SCHEMAS.SAMPLE_SETS.SCHEMA;
if (isData || isSample) {
const role = isData ? 'data' : 'sample';
parent.value.forEach((option) => {
const rowId = parseInt(option.value);
if (!isNaN(rowId)) {
const input = {role, rowId};
if (isData) {
dataInputs.push(input);
}
else {
materialInputs.push(input);
}
}
else {
console.warn('SampleSet/actions/getSampleInputs -- Unable to parse rowId from "' + option.value + '" for ' + role + '.');
}
});
}
}
});
return {
dataInputs,
materialInputs
}
}
getSaveValues(): IDerivePayload {
const { dataInputs, materialInputs } = this.getSampleInputs();
let materialDefault = {};
return {
dataInputs,
materialDefault,
materialInputs,
targetSampleSet: this.targetSampleSet.lsid
};
}
getParentOptions(currentSelection: string): Array<any> {
// exclude options that have already been selected, except the current selection for this input
return this.parentOptions
.filter(o => (
this.sampleParents.every(parent => {
const notParentMatch = !parent.query || !Utils.caseInsensitiveEquals(parent.query, o.value);
const matchesCurrent = currentSelection && Utils.caseInsensitiveEquals(currentSelection, o.value);
return notParentMatch || matchesCurrent;
})
))
.toArray();
}
// Make the call to the Derive API
deriveSamples(materialOutputCount: number): Promise<GenerateSampleResponse> {
const { dataInputs, materialInputs, materialOutputs, materialDefault, targetSampleSet } = this.getSaveValues();
return new Promise((resolve, reject) => {
Ajax.request({
url: buildURL('experiment', 'derive.api'),
jsonData: {
dataInputs,
materialInputs,
targetSampleSet,
materialOutputCount,
materialOutputs,
materialDefault
},
success: Utils.getCallbackWrapper((response) => {
resolve(new GenerateSampleResponse(response));
}),
failure: Utils.getCallbackWrapper((error) => {
reject(error);
})
});
});
}
getSchemaQuery() {
const sampleSetName = this.getTargetSampleSetName();
return sampleSetName ? SchemaQuery.create(SCHEMAS.SAMPLE_SETS.SCHEMA, sampleSetName) : undefined;
}
postSampleGrid(queryGridModel: QueryGridModel) : Promise<any> {
const editorModel = getEditorModel(queryGridModel.getId());
if (!editorModel) {
gridShowError(queryGridModel, {
message: 'Grid does not expose an editor. Ensure the grid is properly initialized for editing.'
});
return;
}
const rows = editorModel.getRawData(queryGridModel).valueSeq()
.reduce((rows, row) => rows.push(row.toMap()), List<Map<string, any>>());
// TODO: InsertRows responses are fragile and depend heavily on shape of data uploaded
return insertRows({
fillEmptyFields: true,
schemaQuery : this.getSchemaQuery(),
rows
})
};
static revertParentInputSchema(inputColumn: QueryColumn): SchemaQuery {
if (inputColumn.isExpInput()) {
const fieldKey = inputColumn.fieldKey.toLowerCase().split('/');
if (fieldKey.length === 2) {
let schemaName: string;
if (fieldKey[0] === QueryColumn.DATA_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.DATA_CLASSES.SCHEMA;
}
else if (fieldKey[0] === QueryColumn.MATERIAL_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.SAMPLE_SETS.SCHEMA;
}
else {
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey. "' + fieldKey[0] + '"'); | }
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey length.');
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn.');
}
getGridValues(queryInfo: QueryInfo): Map<any, any> {
let data = List<Map<string, any>>();
for (let i = 0; i < this.sampleCount; i++) {
let values = Map<string, any>();
queryInfo
.getInsertColumns()
.forEach((col) => {
const colName = col.name;
if (col.isExpInput()) {
// Convert parent values into appropriate column names
const sq = SampleIdCreationModel.revertParentInputSchema(col);
// should be only one parent with the matching schema and query name
const selected = this.sampleParents.find((parent) => parent.schema === sq.schemaName && parent.query === sq.queryName);
if (selected && selected.value) {
values = values.set(colName, selected.value);
}
}
});
data = data.push(values);
}
return data.toOrderedMap();
}
}
export interface ISampleSetDetails {
isUpdate?: boolean
rowId?: number
name?: string
nameExpression?: string
description?: string
importAliasKeys?: Array<string>
importAliasValues?: Array<string>
}
export interface IParentAlias {
alias: string;
id: string; //generated by panel used for removal, not saved
parentValue: IParentOption;
} | }
return SchemaQuery.create(schemaName, fieldKey[1]); | random_line_split |
models.ts | /*
* Copyright (c) 2019 LabKey Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Ajax, Filter, Utils } from '@labkey/api';
import { List, Map, Record } from 'immutable';
import { Option } from 'react-select';
import { getEditorModel } from '../../global';
import { insertRows } from '../../query/api';
import { gridShowError } from '../../actions';
import { SCHEMAS } from '../base/models/schemas';
import { QueryColumn, QueryGridModel, QueryInfo, SchemaQuery } from '../base/models/model';
import { generateId } from '../../util/utils';
import { buildURL } from '../../url/ActionURL';
export interface SampleInputProps {
role: string
rowId: number
}
export interface IDerivePayload {
dataInputs?: Array<SampleInputProps>
materialDefault?: any
materialInputs?: Array<SampleInputProps>
materialOutputCount?: number
materialOutputs?: Array<{[key: string]: any}>
targetSampleSet: string
}
export interface IParentOption extends Option {
query?: string
schema?: string
}
export interface DisplayObject {
displayValue: any,
value: any
}
export class SampleSetParentType extends Record({
index: undefined,
key: undefined,
query: undefined,
schema: undefined,
value: undefined,
}) {
index: number;
key: string;
query: string;
schema: string;
value: List<DisplayObject>;
constructor(values?: any) {
super(values);
}
static create(values: any) {
if (!values.key)
values.key = generateId('parent-type-');
return new SampleSetParentType(values);
}
}
export interface ISampleSetOption extends Option {
lsid: string
rowId: number
}
export class SampleSetOption implements ISampleSetOption {
label: string;
lsid: string;
rowId: number;
value: any;
constructor(props?: Partial<SampleSetOption>) {
if (props) {
for (let k in props) {
this[k] = props[k];
}
}
}
}
interface MaterialOutput {
created: any
createdBy: string
id: number
lsid: string
modified: any
modifiedBy: string
name: string
properties: any
sampleSet: any
}
export class GenerateSampleResponse extends Record( {
data: undefined,
message: undefined,
success: false
}) {
data: {
materialOutputs: Array<MaterialOutput>
[key: string]: any
};
message: string;
success: boolean;
constructor(values?: any) {
super(values);
}
// Get all of the rowIds of the newly generated sampleIds (or the runs)
getFilter(): Filter.IFilter {
let filterColumn: string,
filterValue;
// data.id is the run rowId. If provided, create a filter based off the run instead of sampleIds.
if (this.data.id) {
filterColumn = 'Run/RowId';
filterValue = [this.data.id];
} else {
filterColumn = 'RowId';
// if a run id was not included, filter based on generated sample Ids.
filterValue = this.data.materialOutputs.map(val => val.id);
}
return Filter.create(filterColumn, filterValue, Filter.Types.IN);
}
}
export class SampleIdCreationModel extends Record({
errors: undefined,
initialSampleSet: undefined,
isError: false,
isInit: false,
parents: Array<string>(),
parentOptions: List<IParentOption>(),
sampleParents: List<SampleSetParentType>(),
sampleSetData: Map<string, any>(),
sampleSetOptions: List<ISampleSetOption>(),
selectionKey: undefined,
targetSampleSet: undefined,
sampleCount: 0
}) {
errors: Array<any>;
initialSampleSet: any;
isError: boolean;
isInit: boolean;
parents: Array<string>; // TODO should be 'originalParents'
parentOptions: List<IParentOption>;
sampleParents: List<SampleSetParentType>;
sampleSetData: Map<string, any>;
sampleSetOptions: List<ISampleSetOption>;
selectionKey: string;
targetSampleSet: SampleSetOption;
sampleCount: number;
constructor(values?: any) {
super(values);
}
hasTargetSampleSet() : boolean {
return this.targetSampleSet && this.targetSampleSet.value
}
getTargetSampleSetName() : string {
return this.hasTargetSampleSet() ? this.targetSampleSet.value : undefined;
}
getSampleInputs(): {
dataInputs: Array<SampleInputProps>,
materialInputs: Array<SampleInputProps>
} {
let dataInputs: Array<SampleInputProps> = [],
materialInputs: Array<SampleInputProps> = [];
this.sampleParents.forEach((parent, index) => {
if (parent.value) {
const isData = parent.schema === SCHEMAS.DATA_CLASSES.SCHEMA;
const isSample = parent.schema === SCHEMAS.SAMPLE_SETS.SCHEMA;
if (isData || isSample) {
const role = isData ? 'data' : 'sample';
parent.value.forEach((option) => {
const rowId = parseInt(option.value);
if (!isNaN(rowId)) |
else {
console.warn('SampleSet/actions/getSampleInputs -- Unable to parse rowId from "' + option.value + '" for ' + role + '.');
}
});
}
}
});
return {
dataInputs,
materialInputs
}
}
getSaveValues(): IDerivePayload {
const { dataInputs, materialInputs } = this.getSampleInputs();
let materialDefault = {};
return {
dataInputs,
materialDefault,
materialInputs,
targetSampleSet: this.targetSampleSet.lsid
};
}
getParentOptions(currentSelection: string): Array<any> {
// exclude options that have already been selected, except the current selection for this input
return this.parentOptions
.filter(o => (
this.sampleParents.every(parent => {
const notParentMatch = !parent.query || !Utils.caseInsensitiveEquals(parent.query, o.value);
const matchesCurrent = currentSelection && Utils.caseInsensitiveEquals(currentSelection, o.value);
return notParentMatch || matchesCurrent;
})
))
.toArray();
}
// Make the call to the Derive API
deriveSamples(materialOutputCount: number): Promise<GenerateSampleResponse> {
const { dataInputs, materialInputs, materialOutputs, materialDefault, targetSampleSet } = this.getSaveValues();
return new Promise((resolve, reject) => {
Ajax.request({
url: buildURL('experiment', 'derive.api'),
jsonData: {
dataInputs,
materialInputs,
targetSampleSet,
materialOutputCount,
materialOutputs,
materialDefault
},
success: Utils.getCallbackWrapper((response) => {
resolve(new GenerateSampleResponse(response));
}),
failure: Utils.getCallbackWrapper((error) => {
reject(error);
})
});
});
}
getSchemaQuery() {
const sampleSetName = this.getTargetSampleSetName();
return sampleSetName ? SchemaQuery.create(SCHEMAS.SAMPLE_SETS.SCHEMA, sampleSetName) : undefined;
}
postSampleGrid(queryGridModel: QueryGridModel) : Promise<any> {
const editorModel = getEditorModel(queryGridModel.getId());
if (!editorModel) {
gridShowError(queryGridModel, {
message: 'Grid does not expose an editor. Ensure the grid is properly initialized for editing.'
});
return;
}
const rows = editorModel.getRawData(queryGridModel).valueSeq()
.reduce((rows, row) => rows.push(row.toMap()), List<Map<string, any>>());
// TODO: InsertRows responses are fragile and depend heavily on shape of data uploaded
return insertRows({
fillEmptyFields: true,
schemaQuery : this.getSchemaQuery(),
rows
})
};
static revertParentInputSchema(inputColumn: QueryColumn): SchemaQuery {
if (inputColumn.isExpInput()) {
const fieldKey = inputColumn.fieldKey.toLowerCase().split('/');
if (fieldKey.length === 2) {
let schemaName: string;
if (fieldKey[0] === QueryColumn.DATA_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.DATA_CLASSES.SCHEMA;
}
else if (fieldKey[0] === QueryColumn.MATERIAL_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.SAMPLE_SETS.SCHEMA;
}
else {
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey. "' + fieldKey[0] + '"');
}
return SchemaQuery.create(schemaName, fieldKey[1]);
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey length.');
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn.');
}
getGridValues(queryInfo: QueryInfo): Map<any, any> {
let data = List<Map<string, any>>();
for (let i = 0; i < this.sampleCount; i++) {
let values = Map<string, any>();
queryInfo
.getInsertColumns()
.forEach((col) => {
const colName = col.name;
if (col.isExpInput()) {
// Convert parent values into appropriate column names
const sq = SampleIdCreationModel.revertParentInputSchema(col);
// should be only one parent with the matching schema and query name
const selected = this.sampleParents.find((parent) => parent.schema === sq.schemaName && parent.query === sq.queryName);
if (selected && selected.value) {
values = values.set(colName, selected.value);
}
}
});
data = data.push(values);
}
return data.toOrderedMap();
}
}
export interface ISampleSetDetails {
isUpdate?: boolean
rowId?: number
name?: string
nameExpression?: string
description?: string
importAliasKeys?: Array<string>
importAliasValues?: Array<string>
}
export interface IParentAlias {
alias: string;
id: string; //generated by panel used for removal, not saved
parentValue: IParentOption;
}
| {
const input = {role, rowId};
if (isData) {
dataInputs.push(input);
}
else {
materialInputs.push(input);
}
} | conditional_block |
models.ts | /*
* Copyright (c) 2019 LabKey Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Ajax, Filter, Utils } from '@labkey/api';
import { List, Map, Record } from 'immutable';
import { Option } from 'react-select';
import { getEditorModel } from '../../global';
import { insertRows } from '../../query/api';
import { gridShowError } from '../../actions';
import { SCHEMAS } from '../base/models/schemas';
import { QueryColumn, QueryGridModel, QueryInfo, SchemaQuery } from '../base/models/model';
import { generateId } from '../../util/utils';
import { buildURL } from '../../url/ActionURL';
export interface SampleInputProps {
role: string
rowId: number
}
export interface IDerivePayload {
dataInputs?: Array<SampleInputProps>
materialDefault?: any
materialInputs?: Array<SampleInputProps>
materialOutputCount?: number
materialOutputs?: Array<{[key: string]: any}>
targetSampleSet: string
}
export interface IParentOption extends Option {
query?: string
schema?: string
}
export interface DisplayObject {
displayValue: any,
value: any
}
export class SampleSetParentType extends Record({
index: undefined,
key: undefined,
query: undefined,
schema: undefined,
value: undefined,
}) {
index: number;
key: string;
query: string;
schema: string;
value: List<DisplayObject>;
constructor(values?: any) {
super(values);
}
static create(values: any) {
if (!values.key)
values.key = generateId('parent-type-');
return new SampleSetParentType(values);
}
}
export interface ISampleSetOption extends Option {
lsid: string
rowId: number
}
export class SampleSetOption implements ISampleSetOption {
label: string;
lsid: string;
rowId: number;
value: any;
constructor(props?: Partial<SampleSetOption>) {
if (props) {
for (let k in props) {
this[k] = props[k];
}
}
}
}
interface MaterialOutput {
created: any
createdBy: string
id: number
lsid: string
modified: any
modifiedBy: string
name: string
properties: any
sampleSet: any
}
export class GenerateSampleResponse extends Record( {
data: undefined,
message: undefined,
success: false
}) {
data: {
materialOutputs: Array<MaterialOutput>
[key: string]: any
};
message: string;
success: boolean;
constructor(values?: any) {
super(values);
}
// Get all of the rowIds of the newly generated sampleIds (or the runs)
getFilter(): Filter.IFilter {
let filterColumn: string,
filterValue;
// data.id is the run rowId. If provided, create a filter based off the run instead of sampleIds.
if (this.data.id) {
filterColumn = 'Run/RowId';
filterValue = [this.data.id];
} else {
filterColumn = 'RowId';
// if a run id was not included, filter based on generated sample Ids.
filterValue = this.data.materialOutputs.map(val => val.id);
}
return Filter.create(filterColumn, filterValue, Filter.Types.IN);
}
}
export class SampleIdCreationModel extends Record({
errors: undefined,
initialSampleSet: undefined,
isError: false,
isInit: false,
parents: Array<string>(),
parentOptions: List<IParentOption>(),
sampleParents: List<SampleSetParentType>(),
sampleSetData: Map<string, any>(),
sampleSetOptions: List<ISampleSetOption>(),
selectionKey: undefined,
targetSampleSet: undefined,
sampleCount: 0
}) {
errors: Array<any>;
initialSampleSet: any;
isError: boolean;
isInit: boolean;
parents: Array<string>; // TODO should be 'originalParents'
parentOptions: List<IParentOption>;
sampleParents: List<SampleSetParentType>;
sampleSetData: Map<string, any>;
sampleSetOptions: List<ISampleSetOption>;
selectionKey: string;
targetSampleSet: SampleSetOption;
sampleCount: number;
constructor(values?: any) {
super(values);
}
hasTargetSampleSet() : boolean {
return this.targetSampleSet && this.targetSampleSet.value
}
| () : string {
return this.hasTargetSampleSet() ? this.targetSampleSet.value : undefined;
}
getSampleInputs(): {
dataInputs: Array<SampleInputProps>,
materialInputs: Array<SampleInputProps>
} {
let dataInputs: Array<SampleInputProps> = [],
materialInputs: Array<SampleInputProps> = [];
this.sampleParents.forEach((parent, index) => {
if (parent.value) {
const isData = parent.schema === SCHEMAS.DATA_CLASSES.SCHEMA;
const isSample = parent.schema === SCHEMAS.SAMPLE_SETS.SCHEMA;
if (isData || isSample) {
const role = isData ? 'data' : 'sample';
parent.value.forEach((option) => {
const rowId = parseInt(option.value);
if (!isNaN(rowId)) {
const input = {role, rowId};
if (isData) {
dataInputs.push(input);
}
else {
materialInputs.push(input);
}
}
else {
console.warn('SampleSet/actions/getSampleInputs -- Unable to parse rowId from "' + option.value + '" for ' + role + '.');
}
});
}
}
});
return {
dataInputs,
materialInputs
}
}
getSaveValues(): IDerivePayload {
const { dataInputs, materialInputs } = this.getSampleInputs();
let materialDefault = {};
return {
dataInputs,
materialDefault,
materialInputs,
targetSampleSet: this.targetSampleSet.lsid
};
}
getParentOptions(currentSelection: string): Array<any> {
// exclude options that have already been selected, except the current selection for this input
return this.parentOptions
.filter(o => (
this.sampleParents.every(parent => {
const notParentMatch = !parent.query || !Utils.caseInsensitiveEquals(parent.query, o.value);
const matchesCurrent = currentSelection && Utils.caseInsensitiveEquals(currentSelection, o.value);
return notParentMatch || matchesCurrent;
})
))
.toArray();
}
// Make the call to the Derive API
deriveSamples(materialOutputCount: number): Promise<GenerateSampleResponse> {
const { dataInputs, materialInputs, materialOutputs, materialDefault, targetSampleSet } = this.getSaveValues();
return new Promise((resolve, reject) => {
Ajax.request({
url: buildURL('experiment', 'derive.api'),
jsonData: {
dataInputs,
materialInputs,
targetSampleSet,
materialOutputCount,
materialOutputs,
materialDefault
},
success: Utils.getCallbackWrapper((response) => {
resolve(new GenerateSampleResponse(response));
}),
failure: Utils.getCallbackWrapper((error) => {
reject(error);
})
});
});
}
getSchemaQuery() {
const sampleSetName = this.getTargetSampleSetName();
return sampleSetName ? SchemaQuery.create(SCHEMAS.SAMPLE_SETS.SCHEMA, sampleSetName) : undefined;
}
postSampleGrid(queryGridModel: QueryGridModel) : Promise<any> {
const editorModel = getEditorModel(queryGridModel.getId());
if (!editorModel) {
gridShowError(queryGridModel, {
message: 'Grid does not expose an editor. Ensure the grid is properly initialized for editing.'
});
return;
}
const rows = editorModel.getRawData(queryGridModel).valueSeq()
.reduce((rows, row) => rows.push(row.toMap()), List<Map<string, any>>());
// TODO: InsertRows responses are fragile and depend heavily on shape of data uploaded
return insertRows({
fillEmptyFields: true,
schemaQuery : this.getSchemaQuery(),
rows
})
};
static revertParentInputSchema(inputColumn: QueryColumn): SchemaQuery {
if (inputColumn.isExpInput()) {
const fieldKey = inputColumn.fieldKey.toLowerCase().split('/');
if (fieldKey.length === 2) {
let schemaName: string;
if (fieldKey[0] === QueryColumn.DATA_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.DATA_CLASSES.SCHEMA;
}
else if (fieldKey[0] === QueryColumn.MATERIAL_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.SAMPLE_SETS.SCHEMA;
}
else {
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey. "' + fieldKey[0] + '"');
}
return SchemaQuery.create(schemaName, fieldKey[1]);
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey length.');
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn.');
}
getGridValues(queryInfo: QueryInfo): Map<any, any> {
let data = List<Map<string, any>>();
for (let i = 0; i < this.sampleCount; i++) {
let values = Map<string, any>();
queryInfo
.getInsertColumns()
.forEach((col) => {
const colName = col.name;
if (col.isExpInput()) {
// Convert parent values into appropriate column names
const sq = SampleIdCreationModel.revertParentInputSchema(col);
// should be only one parent with the matching schema and query name
const selected = this.sampleParents.find((parent) => parent.schema === sq.schemaName && parent.query === sq.queryName);
if (selected && selected.value) {
values = values.set(colName, selected.value);
}
}
});
data = data.push(values);
}
return data.toOrderedMap();
}
}
export interface ISampleSetDetails {
isUpdate?: boolean
rowId?: number
name?: string
nameExpression?: string
description?: string
importAliasKeys?: Array<string>
importAliasValues?: Array<string>
}
export interface IParentAlias {
alias: string;
id: string; //generated by panel used for removal, not saved
parentValue: IParentOption;
}
| getTargetSampleSetName | identifier_name |
models.ts | /*
* Copyright (c) 2019 LabKey Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Ajax, Filter, Utils } from '@labkey/api';
import { List, Map, Record } from 'immutable';
import { Option } from 'react-select';
import { getEditorModel } from '../../global';
import { insertRows } from '../../query/api';
import { gridShowError } from '../../actions';
import { SCHEMAS } from '../base/models/schemas';
import { QueryColumn, QueryGridModel, QueryInfo, SchemaQuery } from '../base/models/model';
import { generateId } from '../../util/utils';
import { buildURL } from '../../url/ActionURL';
export interface SampleInputProps {
role: string
rowId: number
}
export interface IDerivePayload {
dataInputs?: Array<SampleInputProps>
materialDefault?: any
materialInputs?: Array<SampleInputProps>
materialOutputCount?: number
materialOutputs?: Array<{[key: string]: any}>
targetSampleSet: string
}
export interface IParentOption extends Option {
query?: string
schema?: string
}
export interface DisplayObject {
displayValue: any,
value: any
}
export class SampleSetParentType extends Record({
index: undefined,
key: undefined,
query: undefined,
schema: undefined,
value: undefined,
}) {
index: number;
key: string;
query: string;
schema: string;
value: List<DisplayObject>;
constructor(values?: any) {
super(values);
}
static create(values: any) {
if (!values.key)
values.key = generateId('parent-type-');
return new SampleSetParentType(values);
}
}
export interface ISampleSetOption extends Option {
lsid: string
rowId: number
}
export class SampleSetOption implements ISampleSetOption {
label: string;
lsid: string;
rowId: number;
value: any;
constructor(props?: Partial<SampleSetOption>) {
if (props) {
for (let k in props) {
this[k] = props[k];
}
}
}
}
interface MaterialOutput {
created: any
createdBy: string
id: number
lsid: string
modified: any
modifiedBy: string
name: string
properties: any
sampleSet: any
}
export class GenerateSampleResponse extends Record( {
data: undefined,
message: undefined,
success: false
}) {
data: {
materialOutputs: Array<MaterialOutput>
[key: string]: any
};
message: string;
success: boolean;
constructor(values?: any) {
super(values);
}
// Get all of the rowIds of the newly generated sampleIds (or the runs)
getFilter(): Filter.IFilter {
let filterColumn: string,
filterValue;
// data.id is the run rowId. If provided, create a filter based off the run instead of sampleIds.
if (this.data.id) {
filterColumn = 'Run/RowId';
filterValue = [this.data.id];
} else {
filterColumn = 'RowId';
// if a run id was not included, filter based on generated sample Ids.
filterValue = this.data.materialOutputs.map(val => val.id);
}
return Filter.create(filterColumn, filterValue, Filter.Types.IN);
}
}
export class SampleIdCreationModel extends Record({
errors: undefined,
initialSampleSet: undefined,
isError: false,
isInit: false,
parents: Array<string>(),
parentOptions: List<IParentOption>(),
sampleParents: List<SampleSetParentType>(),
sampleSetData: Map<string, any>(),
sampleSetOptions: List<ISampleSetOption>(),
selectionKey: undefined,
targetSampleSet: undefined,
sampleCount: 0
}) {
errors: Array<any>;
initialSampleSet: any;
isError: boolean;
isInit: boolean;
parents: Array<string>; // TODO should be 'originalParents'
parentOptions: List<IParentOption>;
sampleParents: List<SampleSetParentType>;
sampleSetData: Map<string, any>;
sampleSetOptions: List<ISampleSetOption>;
selectionKey: string;
targetSampleSet: SampleSetOption;
sampleCount: number;
constructor(values?: any) {
super(values);
}
hasTargetSampleSet() : boolean {
return this.targetSampleSet && this.targetSampleSet.value
}
getTargetSampleSetName() : string {
return this.hasTargetSampleSet() ? this.targetSampleSet.value : undefined;
}
getSampleInputs(): {
dataInputs: Array<SampleInputProps>,
materialInputs: Array<SampleInputProps>
} {
let dataInputs: Array<SampleInputProps> = [],
materialInputs: Array<SampleInputProps> = [];
this.sampleParents.forEach((parent, index) => {
if (parent.value) {
const isData = parent.schema === SCHEMAS.DATA_CLASSES.SCHEMA;
const isSample = parent.schema === SCHEMAS.SAMPLE_SETS.SCHEMA;
if (isData || isSample) {
const role = isData ? 'data' : 'sample';
parent.value.forEach((option) => {
const rowId = parseInt(option.value);
if (!isNaN(rowId)) {
const input = {role, rowId};
if (isData) {
dataInputs.push(input);
}
else {
materialInputs.push(input);
}
}
else {
console.warn('SampleSet/actions/getSampleInputs -- Unable to parse rowId from "' + option.value + '" for ' + role + '.');
}
});
}
}
});
return {
dataInputs,
materialInputs
}
}
getSaveValues(): IDerivePayload {
const { dataInputs, materialInputs } = this.getSampleInputs();
let materialDefault = {};
return {
dataInputs,
materialDefault,
materialInputs,
targetSampleSet: this.targetSampleSet.lsid
};
}
getParentOptions(currentSelection: string): Array<any> {
// exclude options that have already been selected, except the current selection for this input
return this.parentOptions
.filter(o => (
this.sampleParents.every(parent => {
const notParentMatch = !parent.query || !Utils.caseInsensitiveEquals(parent.query, o.value);
const matchesCurrent = currentSelection && Utils.caseInsensitiveEquals(currentSelection, o.value);
return notParentMatch || matchesCurrent;
})
))
.toArray();
}
// Make the call to the Derive API
deriveSamples(materialOutputCount: number): Promise<GenerateSampleResponse> {
const { dataInputs, materialInputs, materialOutputs, materialDefault, targetSampleSet } = this.getSaveValues();
return new Promise((resolve, reject) => {
Ajax.request({
url: buildURL('experiment', 'derive.api'),
jsonData: {
dataInputs,
materialInputs,
targetSampleSet,
materialOutputCount,
materialOutputs,
materialDefault
},
success: Utils.getCallbackWrapper((response) => {
resolve(new GenerateSampleResponse(response));
}),
failure: Utils.getCallbackWrapper((error) => {
reject(error);
})
});
});
}
getSchemaQuery() {
const sampleSetName = this.getTargetSampleSetName();
return sampleSetName ? SchemaQuery.create(SCHEMAS.SAMPLE_SETS.SCHEMA, sampleSetName) : undefined;
}
postSampleGrid(queryGridModel: QueryGridModel) : Promise<any> {
const editorModel = getEditorModel(queryGridModel.getId());
if (!editorModel) {
gridShowError(queryGridModel, {
message: 'Grid does not expose an editor. Ensure the grid is properly initialized for editing.'
});
return;
}
const rows = editorModel.getRawData(queryGridModel).valueSeq()
.reduce((rows, row) => rows.push(row.toMap()), List<Map<string, any>>());
// TODO: InsertRows responses are fragile and depend heavily on shape of data uploaded
return insertRows({
fillEmptyFields: true,
schemaQuery : this.getSchemaQuery(),
rows
})
};
static revertParentInputSchema(inputColumn: QueryColumn): SchemaQuery |
getGridValues(queryInfo: QueryInfo): Map<any, any> {
let data = List<Map<string, any>>();
for (let i = 0; i < this.sampleCount; i++) {
let values = Map<string, any>();
queryInfo
.getInsertColumns()
.forEach((col) => {
const colName = col.name;
if (col.isExpInput()) {
// Convert parent values into appropriate column names
const sq = SampleIdCreationModel.revertParentInputSchema(col);
// should be only one parent with the matching schema and query name
const selected = this.sampleParents.find((parent) => parent.schema === sq.schemaName && parent.query === sq.queryName);
if (selected && selected.value) {
values = values.set(colName, selected.value);
}
}
});
data = data.push(values);
}
return data.toOrderedMap();
}
}
export interface ISampleSetDetails {
isUpdate?: boolean
rowId?: number
name?: string
nameExpression?: string
description?: string
importAliasKeys?: Array<string>
importAliasValues?: Array<string>
}
export interface IParentAlias {
alias: string;
id: string; //generated by panel used for removal, not saved
parentValue: IParentOption;
}
| {
if (inputColumn.isExpInput()) {
const fieldKey = inputColumn.fieldKey.toLowerCase().split('/');
if (fieldKey.length === 2) {
let schemaName: string;
if (fieldKey[0] === QueryColumn.DATA_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.DATA_CLASSES.SCHEMA;
}
else if (fieldKey[0] === QueryColumn.MATERIAL_INPUTS.toLowerCase()) {
schemaName = SCHEMAS.SAMPLE_SETS.SCHEMA;
}
else {
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey. "' + fieldKey[0] + '"');
}
return SchemaQuery.create(schemaName, fieldKey[1]);
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn fieldKey length.');
}
throw new Error('SampleIdCreationModel.models.revertParentInputSchema -- invalid inputColumn.');
} | identifier_body |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> |
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() && !values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if !descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
} | identifier_body |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() && !values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if !descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass, | mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
} | },
);
pipeline_desc.blender.targets.push(ColorBlendDesc { | random_line_split |
easy.rs | use super::*;
use crate::utils::over;
pub fn init<B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() && !values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if !descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else | ;
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| { vec![] } | conditional_block |
easy.rs | use super::*;
use crate::utils::over;
pub fn | <B: Backend>(
window: &crate::windowing::window::Window,
name: &str,
version: u32,
) -> Result<
(
B::Instance,
B::Surface,
Format,
Adapter<B>,
B::Device,
QueueGroup<B>,
B::CommandPool,
),
&'static str,
> {
let instance = B::Instance::create(name, version).map_err(|_| "unsupported backend")?;
let surface = unsafe {
instance
.create_surface(window)
.map_err(|_| "create_surface failed")?
};
let adapter = instance.enumerate_adapters().remove(0);
let surface_color_format = {
use gfx_hal::format::ChannelType;
let supported_formats = surface
.supported_formats(&adapter.physical_device)
.unwrap_or(vec![]);
let default_format = *supported_formats.get(0).unwrap_or(&Format::Rgba8Srgb);
supported_formats
.into_iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.unwrap_or(default_format)
};
let (device, queue_group) = {
let queue_family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.ok_or("failed to find queue family")?;
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(queue_family, &[1.0])], gfx_hal::Features::empty())
.expect("Failed to open device")
};
(gpu.device, gpu.queue_groups.pop().unwrap())
};
let command_pool = unsafe {
use gfx_hal::pool::CommandPoolCreateFlags;
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::empty())
.expect("out of memory")
};
Ok((
instance,
surface,
surface_color_format,
adapter,
device,
queue_group,
command_pool,
))
}
pub fn desc_sets<B: Backend>(
device: &B::Device,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) -> (
B::DescriptorSetLayout,
B::DescriptorPool,
Vec<B::DescriptorSet>,
) {
use gfx_hal::pso::*;
let sets = values.len();
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
let mut binding_number = 0;
let mut bindings = vec![];
let mut ranges = vec![];
for _ in 0..ubos {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Buffer {
ty: BufferDescriptorType::Uniform,
format: BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..images {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Image {
ty: gfx_hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: sets,
});
binding_number += 1;
}
for _ in 0..samplers {
bindings.push(DescriptorSetLayoutBinding {
binding: binding_number,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
});
ranges.push(DescriptorRangeDesc {
ty: DescriptorType::Sampler,
count: sets,
});
binding_number += 1;
}
let (layout, pool, mut desc_sets) = unsafe {
let layout = device
.create_descriptor_set_layout(bindings.into_iter(), over([]))
.unwrap();
let mut pool = device
.create_descriptor_pool(sets, ranges.into_iter(), DescriptorPoolCreateFlags::empty())
.unwrap();
let mut desc_sets = Vec::with_capacity(sets);
for _ in 0..sets {
desc_sets.push(pool.allocate_one(&layout).unwrap());
}
(layout, pool, desc_sets)
};
write_desc_sets::<B>(device, desc_sets.iter_mut().collect(), values);
(layout, pool, desc_sets)
}
pub fn write_desc_sets<B: Backend>(
device: &B::Device,
desc_sets: Vec<&mut B::DescriptorSet>,
values: Vec<(Vec<&B::Buffer>, Vec<&B::ImageView>, Vec<&B::Sampler>)>,
) {
use gfx_hal::pso::*;
assert!(
desc_sets.len() == values.len() && !values.is_empty(),
"Must supply a matching, non-zero number of desc_sets and values"
);
let ubos = values.get(0).map(|set| set.0.len()).unwrap_or(0);
let images = values.get(0).map(|set| set.1.len()).unwrap_or(0);
let samplers = values.get(0).map(|set| set.2.len()).unwrap_or(0);
assert!(
values
.iter()
.all(|set| set.0.len() == ubos && set.1.len() == images && set.2.len() == samplers),
"All desc_sets must have the same layout of values"
);
for (set_values, desc_set) in values.into_iter().zip(desc_sets.into_iter()) {
use gfx_hal::buffer::SubRange;
let mut descriptors = Vec::with_capacity(ubos + images + samplers);
for buffer in set_values.0 {
descriptors.push(Descriptor::Buffer(buffer, SubRange::WHOLE));
}
for image in set_values.1 {
descriptors.push(Descriptor::Image(image, gfx_hal::image::Layout::Undefined));
}
for sampler in set_values.2 {
descriptors.push(Descriptor::Sampler(sampler));
}
unsafe {
if !descriptors.is_empty() {
device.write_descriptor_set(DescriptorSetWrite {
set: desc_set,
binding: 0,
array_offset: 0,
descriptors: descriptors.into_iter(),
});
}
}
}
}
pub fn render_pass<B: Backend>(
device: &B::Device,
surface_color_format: Format,
depth_format: Option<Format>,
intermediate: bool,
) -> B::RenderPass {
use gfx_hal::image::Layout;
use gfx_hal::pass::{
Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, SubpassDesc,
};
let end_layout = if intermediate {
Layout::ShaderReadOnlyOptimal
} else {
Layout::Present
};
let color_attachment = Attachment {
format: Some(surface_color_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..end_layout,
};
let depth_attachment = depth_format.map(|surface_depth_format| Attachment {
format: Some(surface_depth_format),
samples: 1,
ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
stencil_ops: AttachmentOps::new(AttachmentLoadOp::Clear, AttachmentStoreOp::DontCare),
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
});
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: depth_format.map(|_| &(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
let attachments = match depth_attachment {
Some(depth_attachment) => vec![color_attachment, depth_attachment],
None => vec![color_attachment],
};
device
.create_render_pass(attachments.into_iter(), over([subpass]), over([]))
.expect("out of memory")
}
}
pub fn pipeline<B: SupportedBackend>(
device: &B::Device,
desc_layout: Option<&B::DescriptorSetLayout>,
push_constant_size: u32,
vs_bytes: &[u8],
fs_bytes: &[u8],
render_pass: &B::RenderPass,
depth_format: Option<Format>,
attribute_sizes: &[u32],
) -> (B::GraphicsPipeline, B::PipelineLayout) {
use gfx_hal::pso::*;
let push = vec![(
ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT,
0..push_constant_size,
)];
let push = if push_constant_size > 0 { push } else { vec![] };
let pipeline_layout = unsafe {
device
.create_pipeline_layout(desc_layout.into_iter(), push.into_iter())
.expect("out of memory")
};
let shader_modules = [(vs_bytes, false), (fs_bytes, true)]
.iter()
.map(|&(bytes, is_frag)| unsafe { B::make_shader_module(device, bytes, is_frag) })
.collect::<Vec<_>>();
let mut entries = shader_modules.iter().map(|module| EntryPoint::<B> {
entry: "main",
module,
specialization: Default::default(),
});
let stride = attribute_sizes.iter().sum::<u32>() * std::mem::size_of::<f32>() as u32;
let buffer_desc = if stride > 0 {
vec![VertexBufferDesc {
binding: 0,
stride,
rate: VertexInputRate::Vertex,
}]
} else {
vec![]
};
let mut offset = 0;
let mut attrs = vec![];
for (index, &size) in attribute_sizes.iter().enumerate() {
attrs.push(AttributeDesc {
location: index as u32,
binding: 0,
element: Element {
format: match size {
1 => Format::R32Sfloat,
2 => Format::Rg32Sfloat,
3 => Format::Rgb32Sfloat,
4 => Format::Rgba32Sfloat,
n => panic!("invalid attribute size {}", n),
},
offset,
},
});
offset += size * std::mem::size_of::<f32>() as u32;
}
let primitive_assembler = PrimitiveAssemblerDesc::Vertex {
buffers: &buffer_desc,
attributes: &attrs,
input_assembler: InputAssemblerDesc::new(Primitive::TriangleList),
vertex: entries.next().unwrap(),
tessellation: None,
geometry: None,
};
let mut pipeline_desc = GraphicsPipelineDesc::new(
primitive_assembler,
Rasterizer {
cull_face: Face::BACK,
..Rasterizer::FILL
},
entries.next(),
&pipeline_layout,
gfx_hal::pass::Subpass {
index: 0,
main_pass: &render_pass,
},
);
pipeline_desc.blender.targets.push(ColorBlendDesc {
mask: ColorMask::ALL,
blend: Some(BlendState::ALPHA),
});
if depth_format.is_some() {
pipeline_desc.depth_stencil = DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
};
}
let pipeline = unsafe {
let pipeline = device
.create_graphics_pipeline(&pipeline_desc, None)
.expect("failed to create graphics pipeline");
for module in shader_modules {
device.destroy_shader_module(module);
}
pipeline
};
(pipeline, pipeline_layout)
}
pub fn reconfigure_swapchain<B: Backend>(
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: &B::Device,
surface_color_format: Format,
surface_extent: &mut gfx_hal::window::Extent2D,
) -> FramebufferAttachment {
use gfx_hal::window::SwapchainConfig;
let caps = surface.capabilities(&adapter.physical_device);
let mut swapchain_config =
SwapchainConfig::from_caps(&caps, surface_color_format, *surface_extent);
let framebuffer_attachment = swapchain_config.framebuffer_attachment();
// This seems to fix some fullscreen slowdown on macOS.
if caps.image_count.contains(&3) {
swapchain_config.image_count = 3;
}
*surface_extent = swapchain_config.extent;
unsafe {
surface
.configure_swapchain(device, swapchain_config)
.expect("failed to configure swapchain");
};
framebuffer_attachment
}
// TODO: Remove viewport pls
pub fn acquire_framebuffer<B: Backend>(
device: &B::Device,
surface: &mut B::Surface,
surface_extent: &gfx_hal::window::Extent2D,
render_pass: &B::RenderPass,
framebuffer_attachment: gfx_hal::image::FramebufferAttachment,
) -> Result<
(
B::Framebuffer,
<B::Surface as PresentationSurface<B>>::SwapchainImage,
gfx_hal::pso::Viewport,
),
(),
> {
let acquire_timeout_ns = 1_000_000_000;
match unsafe { surface.acquire_image(acquire_timeout_ns) } {
Ok((surface_image, _)) => unsafe {
use gfx_hal::image::Extent;
let framebuffer = device
.create_framebuffer(
render_pass,
over([framebuffer_attachment]),
Extent {
width: surface_extent.width,
height: surface_extent.height,
depth: 1,
},
)
.unwrap();
let viewport = {
use gfx_hal::pso::Rect;
Viewport {
rect: Rect {
x: 0,
y: 0,
w: surface_extent.width as i16,
h: surface_extent.height as i16,
},
depth: 0.0..1.0,
}
};
Ok((framebuffer, surface_image, viewport))
},
Err(_) => Err(()),
}
}
| init | identifier_name |
sql.go | package processor
import (
"database/sql"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/bloblang/field"
"github.com/Jeffail/benthos/v3/internal/bloblang/mapping"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/internal/tracing"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
// SQL Drivers
_ "github.com/ClickHouse/clickhouse-go"
_ "github.com/denisenkom/go-mssqldb"
_ "github.com/go-sql-driver/mysql"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSQL] = TypeSpec{
constructor: NewSQL,
Categories: []Category{
CategoryIntegration,
},
Status: docs.StatusDeprecated,
Summary: `
Runs an SQL prepared query against a target database for each message and, for
queries that return rows, replaces it with the result according to a
[codec](#result-codecs).`,
Description: `
## Alternatives
Use either the ` + "[`sql_insert`](/docs/components/processors/sql_insert)" + ` or the ` + "[`sql_select`](/docs/components/processors/sql_select)" + ` processor instead.
If a query contains arguments they can be set as an array of strings supporting
[interpolation functions](/docs/configuration/interpolation#bloblang-queries) in
the ` + "`args`" + ` field.
## Drivers
The following is a list of supported drivers and their respective DSN formats:
| Driver | Data Source Name Format |
|---|---|
` + "| `clickhouse` | [`tcp://[netloc][:port][?param1=value1&...¶mN=valueN]`](https://github.com/ClickHouse/clickhouse-go#dsn)" + `
` + "| `mysql` | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` |" + `
` + "| `postgres` | `postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]` |" + `
` + "| `mssql` | `sqlserver://[user[:password]@][netloc][:port][?database=dbname¶m1=value1&...]` |" + `
Please note that the ` + "`postgres`" + ` driver enforces SSL by default, you
can override this with the parameter ` + "`sslmode=disable`" + ` if required.`,
Examples: []docs.AnnotatedExample{
{
Title: "Table Insert (MySQL)",
Summary: `
The following example inserts rows into the table footable with the columns foo,
bar and baz populated with values extracted from messages:`,
Config: `
pipeline:
processors:
- sql:
driver: mysql
data_source_name: foouser:foopassword@tcp(localhost:3306)/foodb
query: "INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);"
args_mapping: '[ document.foo, document.bar, meta("kafka_topic") ]'
`,
},
{
Title: "Table Query (PostgreSQL)",
Summary: `
Here we query a database for columns of footable that share a ` + "`user_id`" + `
with the message ` + "`user.id`" + `. The ` + "`result_codec`" + ` is set to
` + "`json_array`" + ` and a ` + "[`branch` processor](/docs/components/processors/branch)" + `
is used in order to insert the resulting array into the original message at the
path ` + "`foo_rows`" + `:`,
Config: `
pipeline:
processors:
- branch:
processors:
- sql:
driver: postgres
result_codec: json_array
data_source_name: postgres://foouser:foopass@localhost:5432/testdb?sslmode=disable
query: "SELECT * FROM footable WHERE user_id = $1;"
args_mapping: '[ this.user.id ]'
result_map: 'root.foo_rows = this'
`,
},
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"driver",
"A database [driver](#drivers) to use.",
).HasOptions("mysql", "postgres", "clickhouse", "mssql"),
docs.FieldCommon(
"data_source_name", "A Data Source Name to identify the target database.",
"tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000",
"foouser:foopassword@tcp(localhost:3306)/foodb",
"postgres://foouser:foopass@localhost:5432/foodb?sslmode=disable",
),
docs.FieldDeprecated("dsn", ""),
docs.FieldCommon(
"query", "The query to run against the database.",
"INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);",
),
docs.FieldBool(
"unsafe_dynamic_query",
"Whether to enable dynamic queries that support interpolation functions. WARNING: This feature opens up the possibility of SQL injection attacks and is considered unsafe.",
).Advanced().HasDefault(false),
docs.FieldDeprecated(
"args",
"A list of arguments for the query to be resolved for each message.",
).IsInterpolated().Array(),
docs.FieldBloblang(
"args_mapping",
"A [Bloblang mapping](/docs/guides/bloblang/about) that produces the arguments for the query. The mapping must return an array containing the number of arguments in the query.",
`[ this.foo, this.bar.not_empty().catch(null), meta("baz") ]`,
`root = [ uuid_v4() ].merge(this.document.args)`,
).AtVersion("3.47.0"),
docs.FieldCommon(
"result_codec",
"A [codec](#result-codecs) to determine how resulting rows are converted into messages.",
).HasOptions("none", "json_array"),
},
Footnotes: `
## Result Codecs
When a query returns rows they are serialised according to a chosen codec, and
the message contents are replaced with the serialised result.
### ` + "`none`" + `
The result of the query is ignored and the message remains unchanged. If your
query does not return rows then this is the appropriate codec.
### ` + "`json_array`" + `
The resulting rows are serialised into an array of JSON objects, where each
object represents a row, where the key is the column name and the value is that
columns value in the row.`,
}
}
//------------------------------------------------------------------------------
// SQLConfig contains configuration fields for the SQL processor.
type SQLConfig struct {
Driver string `json:"driver" yaml:"driver"`
DataSourceName string `json:"data_source_name" yaml:"data_source_name"`
DSN string `json:"dsn" yaml:"dsn"`
Query string `json:"query" yaml:"query"`
UnsafeDynamicQuery bool `json:"unsafe_dynamic_query" yaml:"unsafe_dynamic_query"`
Args []string `json:"args" yaml:"args"`
ArgsMapping string `json:"args_mapping" yaml:"args_mapping"`
ResultCodec string `json:"result_codec" yaml:"result_codec"`
}
// NewSQLConfig returns a SQLConfig with default values.
func NewSQLConfig() SQLConfig {
return SQLConfig{
Driver: "mysql",
DataSourceName: "",
DSN: "",
Query: "",
UnsafeDynamicQuery: false,
Args: []string{},
ArgsMapping: "",
ResultCodec: "none",
}
}
//------------------------------------------------------------------------------
// Some SQL drivers (such as clickhouse) require prepared inserts to be local to
// a transaction, rather than general.
func insertRequiresTransactionPrepare(driver string) bool {
_, exists := map[string]struct{}{
"clickhouse": {},
}[driver]
return exists
}
//------------------------------------------------------------------------------
// SQL is a processor that executes an SQL query for each message.
type SQL struct {
log log.Modular
stats metrics.Type
conf SQLConfig
db *sql.DB
dbMux sync.RWMutex
args []*field.Expression
argsMapping *mapping.Executor
resCodec sqlResultCodec
// TODO: V4 Remove this
deprecated bool
resCodecDeprecated sqlResultCodecDeprecated
queryStr string
dynQuery *field.Expression
query *sql.Stmt
closeChan chan struct{}
closedChan chan struct{}
closeOnce sync.Once
mCount metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewSQL returns a SQL processor.
func NewSQL(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
deprecated := false
dsn := conf.SQL.DataSourceName
if len(conf.SQL.DSN) > 0 {
if len(dsn) > 0 {
return nil, errors.New("specified both a deprecated `dsn` as well as a `data_source_name`")
}
dsn = conf.SQL.DSN
deprecated = true
}
if len(conf.SQL.Args) > 0 && conf.SQL.ArgsMapping != "" {
return nil, errors.New("cannot specify both `args` and an `args_mapping` in the same processor")
}
var argsMapping *mapping.Executor
if conf.SQL.ArgsMapping != "" {
if deprecated {
return nil, errors.New("the field `args_mapping` cannot be used when running the `sql` processor in deprecated mode (using the `dsn` field), use the `data_source_name` field instead")
}
log.Warnln("using unsafe_dynamic_query leaves you vulnerable to SQL injection attacks")
var err error
if argsMapping, err = interop.NewBloblangMapping(mgr, conf.SQL.ArgsMapping); err != nil {
return nil, fmt.Errorf("failed to parse `args_mapping`: %w", err)
}
}
var args []*field.Expression
for i, v := range conf.SQL.Args {
expr, err := interop.NewBloblangField(mgr, v)
if err != nil {
return nil, fmt.Errorf("failed to parse arg %v expression: %v", i, err)
}
args = append(args, expr)
}
if conf.SQL.Driver == "mssql" {
// For MSSQL, if the user part of the connection string is in the
// `DOMAIN\username` format, then the backslash character needs to be
// URL-encoded.
conf.SQL.DataSourceName = strings.ReplaceAll(conf.SQL.DataSourceName, `\`, "%5C")
}
s := &SQL{
log: log,
stats: stats,
conf: conf.SQL,
args: args,
argsMapping: argsMapping,
queryStr: conf.SQL.Query,
deprecated: deprecated,
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
mCount: stats.GetCounter("count"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
var err error
if deprecated {
s.log.Warnln("Using deprecated SQL functionality due to use of field 'dsn'. To switch to the new processor use the field 'data_source_name' instead. The new processor is not backwards compatible due to differences in how message batches are processed. For more information check out the docs at https://www.benthos.dev/docs/components/processors/sql.")
if conf.SQL.Driver != "mysql" && conf.SQL.Driver != "postgres" && conf.SQL.Driver != "mssql" {
return nil, fmt.Errorf("driver '%v' is not supported with deprecated SQL features (using field 'dsn')", conf.SQL.Driver)
}
if s.resCodecDeprecated, err = strToSQLResultCodecDeprecated(conf.SQL.ResultCodec); err != nil {
return nil, err
}
} else if s.resCodec, err = strToSQLResultCodec(conf.SQL.ResultCodec); err != nil {
return nil, err
}
if s.db, err = sql.Open(conf.SQL.Driver, dsn); err != nil {
return nil, err
}
if conf.SQL.UnsafeDynamicQuery {
if deprecated {
return nil, errors.New("cannot use dynamic queries when running in deprecated mode")
}
if s.dynQuery, err = interop.NewBloblangField(mgr, s.queryStr); err != nil {
return nil, fmt.Errorf("failed to parse dynamic query expression: %v", err)
}
}
isSelectQuery := s.resCodecDeprecated != nil || s.resCodec != nil
// Some drivers only support transactional prepared inserts.
if s.dynQuery == nil && (isSelectQuery || !insertRequiresTransactionPrepare(conf.SQL.Driver)) {
if s.query, err = s.db.Prepare(s.queryStr); err != nil {
s.db.Close()
return nil, fmt.Errorf("failed to prepare query: %v", err)
}
}
go func() {
defer func() {
s.dbMux.Lock()
s.db.Close()
if s.query != nil {
s.query.Close()
}
s.dbMux.Unlock()
close(s.closedChan)
}()
<-s.closeChan
}()
return s, nil
}
//------------------------------------------------------------------------------
type sqlResultCodec func(rows *sql.Rows, part types.Part) error
func sqlResultJSONArrayCodec(rows *sql.Rows, part types.Part) error {
columnNames, err := rows.Columns()
if err != nil {
return err
}
jArray := []interface{}{}
for rows.Next() {
values := make([]interface{}, len(columnNames))
valuesWrapped := make([]interface{}, len(columnNames))
for i := range values {
valuesWrapped[i] = &values[i]
}
if err := rows.Scan(valuesWrapped...); err != nil {
return err
}
jObj := map[string]interface{}{}
for i, v := range values {
switch t := v.(type) {
case string:
jObj[columnNames[i]] = t
case []byte:
jObj[columnNames[i]] = string(t)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
jObj[columnNames[i]] = t
case float32, float64:
jObj[columnNames[i]] = t
case bool:
jObj[columnNames[i]] = t
default:
jObj[columnNames[i]] = t
}
}
jArray = append(jArray, jObj)
}
if err := rows.Err(); err != nil {
return err
}
return part.SetJSON(jArray)
}
func strToSQLResultCodec(codec string) (sqlResultCodec, error) {
switch codec {
case "json_array":
return sqlResultJSONArrayCodec, nil
case "none":
return nil, nil
}
return nil, fmt.Errorf("unrecognised result codec: %v", codec)
}
//------------------------------------------------------------------------------
func (s *SQL) doExecute(argSets [][]interface{}) (errs []error) {
var err error
defer func() {
if err != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
for i := range errs {
if errs[i] == nil {
errs[i] = err
}
}
}
}()
var tx *sql.Tx
if tx, err = s.db.Begin(); err != nil {
return
}
stmt := s.query
if stmt == nil {
if stmt, err = tx.Prepare(s.queryStr); err != nil {
return
}
defer stmt.Close()
} else {
stmt = tx.Stmt(stmt)
}
for i, args := range argSets {
if len(args) == 0 {
continue
}
if _, serr := stmt.Exec(args...); serr != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
errs[i] = serr
}
}
err = tx.Commit()
return
}
func (s *SQL) getArgs(index int, msg types.Message) ([]interface{}, error) {
if len(s.args) > 0 {
args := make([]interface{}, len(s.args))
for i, v := range s.args {
args[i] = v.String(index, msg)
}
return args, nil
}
if s.argsMapping == nil {
return nil, nil
}
pargs, err := s.argsMapping.MapPart(index, msg)
if err != nil {
return nil, err
}
iargs, err := pargs.JSON()
if err != nil {
return nil, fmt.Errorf("mapping returned non-structured result: %w", err)
}
args, ok := iargs.([]interface{})
if !ok {
return nil, fmt.Errorf("mapping returned non-array result: %T", iargs)
}
return args, nil
}
// ProcessMessage logs an event and returns the message unchanged.
func (s *SQL) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
s.dbMux.RLock()
defer s.dbMux.RUnlock()
if s.deprecated {
return s.processMessageDeprecated(msg)
}
s.mCount.Incr(1)
newMsg := msg.Copy()
if s.resCodec == nil && s.dynQuery == nil {
argSets := make([][]interface{}, newMsg.Len())
newMsg.Iter(func(index int, p types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
FlagErr(newMsg.Get(index), err)
return nil
}
argSets[index] = args
return nil
})
for i, err := range s.doExecute(argSets) {
if err != nil |
}
} else {
IteratePartsWithSpanV2(TypeSQL, nil, newMsg, func(index int, span *tracing.Span, part types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
return err
}
if s.resCodec == nil {
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
_, err = s.db.Exec(queryStr, args...)
} else {
_, err = s.query.Exec(args...)
}
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
}
return nil
}
var rows *sql.Rows
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
rows, err = s.db.Query(queryStr, args...)
} else {
rows, err = s.query.Query(args...)
}
if err == nil {
defer rows.Close()
if err = s.resCodec(rows, part); err != nil {
err = fmt.Errorf("failed to apply result codec: %v", err)
}
} else {
err = fmt.Errorf("failed to execute query: %v", err)
}
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
return err
}
return nil
})
}
s.mBatchSent.Incr(1)
s.mSent.Incr(int64(newMsg.Len()))
msgs := [1]types.Message{newMsg}
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (s *SQL) CloseAsync() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// WaitForClose blocks until the processor has closed down.
func (s *SQL) WaitForClose(timeout time.Duration) error {
select {
case <-time.After(timeout):
return types.ErrTimeout
case <-s.closedChan:
}
return nil
}
//------------------------------------------------------------------------------
| {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
FlagErr(newMsg.Get(i), err)
} | conditional_block |
sql.go | package processor
import (
"database/sql"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/bloblang/field"
"github.com/Jeffail/benthos/v3/internal/bloblang/mapping"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/internal/tracing"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
// SQL Drivers
_ "github.com/ClickHouse/clickhouse-go"
_ "github.com/denisenkom/go-mssqldb"
_ "github.com/go-sql-driver/mysql"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSQL] = TypeSpec{
constructor: NewSQL,
Categories: []Category{
CategoryIntegration,
},
Status: docs.StatusDeprecated,
Summary: `
Runs an SQL prepared query against a target database for each message and, for
queries that return rows, replaces it with the result according to a
[codec](#result-codecs).`,
Description: `
## Alternatives
Use either the ` + "[`sql_insert`](/docs/components/processors/sql_insert)" + ` or the ` + "[`sql_select`](/docs/components/processors/sql_select)" + ` processor instead.
If a query contains arguments they can be set as an array of strings supporting
[interpolation functions](/docs/configuration/interpolation#bloblang-queries) in
the ` + "`args`" + ` field.
## Drivers
The following is a list of supported drivers and their respective DSN formats:
| Driver | Data Source Name Format |
|---|---|
` + "| `clickhouse` | [`tcp://[netloc][:port][?param1=value1&...¶mN=valueN]`](https://github.com/ClickHouse/clickhouse-go#dsn)" + `
` + "| `mysql` | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` |" + `
` + "| `postgres` | `postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]` |" + `
` + "| `mssql` | `sqlserver://[user[:password]@][netloc][:port][?database=dbname¶m1=value1&...]` |" + `
Please note that the ` + "`postgres`" + ` driver enforces SSL by default, you
can override this with the parameter ` + "`sslmode=disable`" + ` if required.`,
Examples: []docs.AnnotatedExample{
{
Title: "Table Insert (MySQL)",
Summary: `
The following example inserts rows into the table footable with the columns foo,
bar and baz populated with values extracted from messages:`,
Config: `
pipeline:
processors:
- sql:
driver: mysql
data_source_name: foouser:foopassword@tcp(localhost:3306)/foodb
query: "INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);"
args_mapping: '[ document.foo, document.bar, meta("kafka_topic") ]'
`,
},
{
Title: "Table Query (PostgreSQL)",
Summary: `
Here we query a database for columns of footable that share a ` + "`user_id`" + `
with the message ` + "`user.id`" + `. The ` + "`result_codec`" + ` is set to
` + "`json_array`" + ` and a ` + "[`branch` processor](/docs/components/processors/branch)" + `
is used in order to insert the resulting array into the original message at the
path ` + "`foo_rows`" + `:`,
Config: `
pipeline:
processors:
- branch:
processors:
- sql:
driver: postgres
result_codec: json_array
data_source_name: postgres://foouser:foopass@localhost:5432/testdb?sslmode=disable
query: "SELECT * FROM footable WHERE user_id = $1;"
args_mapping: '[ this.user.id ]'
result_map: 'root.foo_rows = this'
`,
},
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"driver",
"A database [driver](#drivers) to use.",
).HasOptions("mysql", "postgres", "clickhouse", "mssql"),
docs.FieldCommon(
"data_source_name", "A Data Source Name to identify the target database.",
"tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000",
"foouser:foopassword@tcp(localhost:3306)/foodb",
"postgres://foouser:foopass@localhost:5432/foodb?sslmode=disable",
),
docs.FieldDeprecated("dsn", ""),
docs.FieldCommon(
"query", "The query to run against the database.",
"INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);",
),
docs.FieldBool(
"unsafe_dynamic_query",
"Whether to enable dynamic queries that support interpolation functions. WARNING: This feature opens up the possibility of SQL injection attacks and is considered unsafe.",
).Advanced().HasDefault(false),
docs.FieldDeprecated(
"args",
"A list of arguments for the query to be resolved for each message.",
).IsInterpolated().Array(),
docs.FieldBloblang(
"args_mapping",
"A [Bloblang mapping](/docs/guides/bloblang/about) that produces the arguments for the query. The mapping must return an array containing the number of arguments in the query.",
`[ this.foo, this.bar.not_empty().catch(null), meta("baz") ]`,
`root = [ uuid_v4() ].merge(this.document.args)`,
).AtVersion("3.47.0"),
docs.FieldCommon(
"result_codec",
"A [codec](#result-codecs) to determine how resulting rows are converted into messages.",
).HasOptions("none", "json_array"),
},
Footnotes: `
## Result Codecs
When a query returns rows they are serialised according to a chosen codec, and
the message contents are replaced with the serialised result.
### ` + "`none`" + `
The result of the query is ignored and the message remains unchanged. If your
query does not return rows then this is the appropriate codec.
### ` + "`json_array`" + `
The resulting rows are serialised into an array of JSON objects, where each
object represents a row, where the key is the column name and the value is that
columns value in the row.`,
}
}
//------------------------------------------------------------------------------
// SQLConfig contains configuration fields for the SQL processor.
type SQLConfig struct {
Driver string `json:"driver" yaml:"driver"`
DataSourceName string `json:"data_source_name" yaml:"data_source_name"`
DSN string `json:"dsn" yaml:"dsn"`
Query string `json:"query" yaml:"query"`
UnsafeDynamicQuery bool `json:"unsafe_dynamic_query" yaml:"unsafe_dynamic_query"`
Args []string `json:"args" yaml:"args"`
ArgsMapping string `json:"args_mapping" yaml:"args_mapping"`
ResultCodec string `json:"result_codec" yaml:"result_codec"`
}
// NewSQLConfig returns a SQLConfig with default values.
func NewSQLConfig() SQLConfig {
return SQLConfig{
Driver: "mysql",
DataSourceName: "",
DSN: "",
Query: "",
UnsafeDynamicQuery: false,
Args: []string{},
ArgsMapping: "",
ResultCodec: "none",
}
}
//------------------------------------------------------------------------------
// Some SQL drivers (such as clickhouse) require prepared inserts to be local to
// a transaction, rather than general.
func insertRequiresTransactionPrepare(driver string) bool {
_, exists := map[string]struct{}{
"clickhouse": {},
}[driver]
return exists
}
//------------------------------------------------------------------------------
// SQL is a processor that executes an SQL query for each message.
type SQL struct {
log log.Modular
stats metrics.Type
conf SQLConfig
db *sql.DB
dbMux sync.RWMutex
args []*field.Expression
argsMapping *mapping.Executor
resCodec sqlResultCodec
// TODO: V4 Remove this
deprecated bool
resCodecDeprecated sqlResultCodecDeprecated
queryStr string
dynQuery *field.Expression
query *sql.Stmt
closeChan chan struct{}
closedChan chan struct{}
closeOnce sync.Once
mCount metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewSQL returns a SQL processor.
func NewSQL(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
deprecated := false
dsn := conf.SQL.DataSourceName
if len(conf.SQL.DSN) > 0 {
if len(dsn) > 0 {
return nil, errors.New("specified both a deprecated `dsn` as well as a `data_source_name`")
}
dsn = conf.SQL.DSN
deprecated = true
}
if len(conf.SQL.Args) > 0 && conf.SQL.ArgsMapping != "" {
return nil, errors.New("cannot specify both `args` and an `args_mapping` in the same processor")
}
var argsMapping *mapping.Executor
if conf.SQL.ArgsMapping != "" {
if deprecated {
return nil, errors.New("the field `args_mapping` cannot be used when running the `sql` processor in deprecated mode (using the `dsn` field), use the `data_source_name` field instead")
}
log.Warnln("using unsafe_dynamic_query leaves you vulnerable to SQL injection attacks")
var err error
if argsMapping, err = interop.NewBloblangMapping(mgr, conf.SQL.ArgsMapping); err != nil {
return nil, fmt.Errorf("failed to parse `args_mapping`: %w", err)
}
}
var args []*field.Expression
for i, v := range conf.SQL.Args {
expr, err := interop.NewBloblangField(mgr, v)
if err != nil {
return nil, fmt.Errorf("failed to parse arg %v expression: %v", i, err)
}
args = append(args, expr)
}
if conf.SQL.Driver == "mssql" {
// For MSSQL, if the user part of the connection string is in the
// `DOMAIN\username` format, then the backslash character needs to be
// URL-encoded.
conf.SQL.DataSourceName = strings.ReplaceAll(conf.SQL.DataSourceName, `\`, "%5C")
}
s := &SQL{
log: log,
stats: stats,
conf: conf.SQL,
args: args,
argsMapping: argsMapping,
queryStr: conf.SQL.Query,
deprecated: deprecated,
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
mCount: stats.GetCounter("count"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
var err error
if deprecated {
s.log.Warnln("Using deprecated SQL functionality due to use of field 'dsn'. To switch to the new processor use the field 'data_source_name' instead. The new processor is not backwards compatible due to differences in how message batches are processed. For more information check out the docs at https://www.benthos.dev/docs/components/processors/sql.")
if conf.SQL.Driver != "mysql" && conf.SQL.Driver != "postgres" && conf.SQL.Driver != "mssql" {
return nil, fmt.Errorf("driver '%v' is not supported with deprecated SQL features (using field 'dsn')", conf.SQL.Driver)
}
if s.resCodecDeprecated, err = strToSQLResultCodecDeprecated(conf.SQL.ResultCodec); err != nil {
return nil, err
}
} else if s.resCodec, err = strToSQLResultCodec(conf.SQL.ResultCodec); err != nil {
return nil, err
}
if s.db, err = sql.Open(conf.SQL.Driver, dsn); err != nil {
return nil, err
}
if conf.SQL.UnsafeDynamicQuery {
if deprecated {
return nil, errors.New("cannot use dynamic queries when running in deprecated mode")
}
if s.dynQuery, err = interop.NewBloblangField(mgr, s.queryStr); err != nil {
return nil, fmt.Errorf("failed to parse dynamic query expression: %v", err)
}
}
isSelectQuery := s.resCodecDeprecated != nil || s.resCodec != nil
// Some drivers only support transactional prepared inserts.
if s.dynQuery == nil && (isSelectQuery || !insertRequiresTransactionPrepare(conf.SQL.Driver)) {
if s.query, err = s.db.Prepare(s.queryStr); err != nil {
s.db.Close()
return nil, fmt.Errorf("failed to prepare query: %v", err)
}
}
go func() {
defer func() {
s.dbMux.Lock()
s.db.Close()
if s.query != nil {
s.query.Close()
}
s.dbMux.Unlock()
close(s.closedChan)
}()
<-s.closeChan
}()
return s, nil
}
//------------------------------------------------------------------------------
type sqlResultCodec func(rows *sql.Rows, part types.Part) error
func sqlResultJSONArrayCodec(rows *sql.Rows, part types.Part) error {
columnNames, err := rows.Columns()
if err != nil {
return err
}
jArray := []interface{}{}
for rows.Next() {
values := make([]interface{}, len(columnNames))
valuesWrapped := make([]interface{}, len(columnNames))
for i := range values {
valuesWrapped[i] = &values[i]
}
if err := rows.Scan(valuesWrapped...); err != nil {
return err
}
jObj := map[string]interface{}{}
for i, v := range values {
switch t := v.(type) {
case string:
jObj[columnNames[i]] = t
case []byte:
jObj[columnNames[i]] = string(t)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
jObj[columnNames[i]] = t
case float32, float64:
jObj[columnNames[i]] = t
case bool:
jObj[columnNames[i]] = t
default:
jObj[columnNames[i]] = t
}
}
jArray = append(jArray, jObj)
}
if err := rows.Err(); err != nil {
return err
}
return part.SetJSON(jArray)
}
func strToSQLResultCodec(codec string) (sqlResultCodec, error) {
switch codec {
case "json_array":
return sqlResultJSONArrayCodec, nil
case "none":
return nil, nil
}
return nil, fmt.Errorf("unrecognised result codec: %v", codec)
}
//------------------------------------------------------------------------------
func (s *SQL) doExecute(argSets [][]interface{}) (errs []error) {
var err error
defer func() {
if err != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
for i := range errs {
if errs[i] == nil {
errs[i] = err
}
}
}
}()
var tx *sql.Tx
if tx, err = s.db.Begin(); err != nil {
return
}
stmt := s.query
if stmt == nil {
if stmt, err = tx.Prepare(s.queryStr); err != nil {
return
}
defer stmt.Close()
} else {
stmt = tx.Stmt(stmt)
}
for i, args := range argSets {
if len(args) == 0 {
continue
}
if _, serr := stmt.Exec(args...); serr != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
errs[i] = serr
}
}
err = tx.Commit()
return
}
func (s *SQL) | (index int, msg types.Message) ([]interface{}, error) {
if len(s.args) > 0 {
args := make([]interface{}, len(s.args))
for i, v := range s.args {
args[i] = v.String(index, msg)
}
return args, nil
}
if s.argsMapping == nil {
return nil, nil
}
pargs, err := s.argsMapping.MapPart(index, msg)
if err != nil {
return nil, err
}
iargs, err := pargs.JSON()
if err != nil {
return nil, fmt.Errorf("mapping returned non-structured result: %w", err)
}
args, ok := iargs.([]interface{})
if !ok {
return nil, fmt.Errorf("mapping returned non-array result: %T", iargs)
}
return args, nil
}
// ProcessMessage logs an event and returns the message unchanged.
func (s *SQL) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
s.dbMux.RLock()
defer s.dbMux.RUnlock()
if s.deprecated {
return s.processMessageDeprecated(msg)
}
s.mCount.Incr(1)
newMsg := msg.Copy()
if s.resCodec == nil && s.dynQuery == nil {
argSets := make([][]interface{}, newMsg.Len())
newMsg.Iter(func(index int, p types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
FlagErr(newMsg.Get(index), err)
return nil
}
argSets[index] = args
return nil
})
for i, err := range s.doExecute(argSets) {
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
FlagErr(newMsg.Get(i), err)
}
}
} else {
IteratePartsWithSpanV2(TypeSQL, nil, newMsg, func(index int, span *tracing.Span, part types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
return err
}
if s.resCodec == nil {
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
_, err = s.db.Exec(queryStr, args...)
} else {
_, err = s.query.Exec(args...)
}
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
}
return nil
}
var rows *sql.Rows
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
rows, err = s.db.Query(queryStr, args...)
} else {
rows, err = s.query.Query(args...)
}
if err == nil {
defer rows.Close()
if err = s.resCodec(rows, part); err != nil {
err = fmt.Errorf("failed to apply result codec: %v", err)
}
} else {
err = fmt.Errorf("failed to execute query: %v", err)
}
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
return err
}
return nil
})
}
s.mBatchSent.Incr(1)
s.mSent.Incr(int64(newMsg.Len()))
msgs := [1]types.Message{newMsg}
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (s *SQL) CloseAsync() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// WaitForClose blocks until the processor has closed down.
func (s *SQL) WaitForClose(timeout time.Duration) error {
select {
case <-time.After(timeout):
return types.ErrTimeout
case <-s.closedChan:
}
return nil
}
//------------------------------------------------------------------------------
| getArgs | identifier_name |
sql.go | package processor
import (
"database/sql"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/bloblang/field"
"github.com/Jeffail/benthos/v3/internal/bloblang/mapping"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/internal/tracing"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
// SQL Drivers
_ "github.com/ClickHouse/clickhouse-go"
_ "github.com/denisenkom/go-mssqldb"
_ "github.com/go-sql-driver/mysql"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSQL] = TypeSpec{
constructor: NewSQL,
Categories: []Category{
CategoryIntegration,
},
Status: docs.StatusDeprecated,
Summary: `
Runs an SQL prepared query against a target database for each message and, for
queries that return rows, replaces it with the result according to a
[codec](#result-codecs).`,
Description: `
## Alternatives
Use either the ` + "[`sql_insert`](/docs/components/processors/sql_insert)" + ` or the ` + "[`sql_select`](/docs/components/processors/sql_select)" + ` processor instead.
If a query contains arguments they can be set as an array of strings supporting
[interpolation functions](/docs/configuration/interpolation#bloblang-queries) in
the ` + "`args`" + ` field.
## Drivers
The following is a list of supported drivers and their respective DSN formats:
| Driver | Data Source Name Format |
|---|---|
` + "| `clickhouse` | [`tcp://[netloc][:port][?param1=value1&...¶mN=valueN]`](https://github.com/ClickHouse/clickhouse-go#dsn)" + `
` + "| `mysql` | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` |" + `
` + "| `postgres` | `postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]` |" + `
` + "| `mssql` | `sqlserver://[user[:password]@][netloc][:port][?database=dbname¶m1=value1&...]` |" + `
Please note that the ` + "`postgres`" + ` driver enforces SSL by default, you
can override this with the parameter ` + "`sslmode=disable`" + ` if required.`,
Examples: []docs.AnnotatedExample{
{
Title: "Table Insert (MySQL)",
Summary: `
The following example inserts rows into the table footable with the columns foo,
bar and baz populated with values extracted from messages:`,
Config: `
pipeline:
processors:
- sql:
driver: mysql
data_source_name: foouser:foopassword@tcp(localhost:3306)/foodb
query: "INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);"
args_mapping: '[ document.foo, document.bar, meta("kafka_topic") ]'
`,
},
{
Title: "Table Query (PostgreSQL)",
Summary: `
Here we query a database for columns of footable that share a ` + "`user_id`" + `
with the message ` + "`user.id`" + `. The ` + "`result_codec`" + ` is set to
` + "`json_array`" + ` and a ` + "[`branch` processor](/docs/components/processors/branch)" + `
is used in order to insert the resulting array into the original message at the
path ` + "`foo_rows`" + `:`,
Config: `
pipeline:
processors:
- branch:
processors:
- sql:
driver: postgres
result_codec: json_array
data_source_name: postgres://foouser:foopass@localhost:5432/testdb?sslmode=disable
query: "SELECT * FROM footable WHERE user_id = $1;"
args_mapping: '[ this.user.id ]'
result_map: 'root.foo_rows = this'
`,
},
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"driver",
"A database [driver](#drivers) to use.",
).HasOptions("mysql", "postgres", "clickhouse", "mssql"),
docs.FieldCommon(
"data_source_name", "A Data Source Name to identify the target database.",
"tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000",
"foouser:foopassword@tcp(localhost:3306)/foodb",
"postgres://foouser:foopass@localhost:5432/foodb?sslmode=disable",
),
docs.FieldDeprecated("dsn", ""),
docs.FieldCommon(
"query", "The query to run against the database.",
"INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);",
),
docs.FieldBool(
"unsafe_dynamic_query",
"Whether to enable dynamic queries that support interpolation functions. WARNING: This feature opens up the possibility of SQL injection attacks and is considered unsafe.",
).Advanced().HasDefault(false),
docs.FieldDeprecated(
"args",
"A list of arguments for the query to be resolved for each message.",
).IsInterpolated().Array(),
docs.FieldBloblang(
"args_mapping",
"A [Bloblang mapping](/docs/guides/bloblang/about) that produces the arguments for the query. The mapping must return an array containing the number of arguments in the query.",
`[ this.foo, this.bar.not_empty().catch(null), meta("baz") ]`,
`root = [ uuid_v4() ].merge(this.document.args)`,
).AtVersion("3.47.0"),
docs.FieldCommon(
"result_codec",
"A [codec](#result-codecs) to determine how resulting rows are converted into messages.",
).HasOptions("none", "json_array"),
},
Footnotes: `
## Result Codecs
When a query returns rows they are serialised according to a chosen codec, and
the message contents are replaced with the serialised result.
### ` + "`none`" + `
The result of the query is ignored and the message remains unchanged. If your
query does not return rows then this is the appropriate codec.
### ` + "`json_array`" + `
The resulting rows are serialised into an array of JSON objects, where each
object represents a row, where the key is the column name and the value is that
columns value in the row.`,
}
}
//------------------------------------------------------------------------------
// SQLConfig contains configuration fields for the SQL processor.
type SQLConfig struct {
Driver string `json:"driver" yaml:"driver"`
DataSourceName string `json:"data_source_name" yaml:"data_source_name"`
DSN string `json:"dsn" yaml:"dsn"`
Query string `json:"query" yaml:"query"`
UnsafeDynamicQuery bool `json:"unsafe_dynamic_query" yaml:"unsafe_dynamic_query"`
Args []string `json:"args" yaml:"args"`
ArgsMapping string `json:"args_mapping" yaml:"args_mapping"`
ResultCodec string `json:"result_codec" yaml:"result_codec"`
}
// NewSQLConfig returns a SQLConfig with default values.
func NewSQLConfig() SQLConfig {
return SQLConfig{
Driver: "mysql",
DataSourceName: "",
DSN: "",
Query: "",
UnsafeDynamicQuery: false,
Args: []string{},
ArgsMapping: "",
ResultCodec: "none",
}
}
//------------------------------------------------------------------------------
// Some SQL drivers (such as clickhouse) require prepared inserts to be local to
// a transaction, rather than general.
func insertRequiresTransactionPrepare(driver string) bool |
//------------------------------------------------------------------------------
// SQL is a processor that executes an SQL query for each message.
type SQL struct {
log log.Modular
stats metrics.Type
conf SQLConfig
db *sql.DB
dbMux sync.RWMutex
args []*field.Expression
argsMapping *mapping.Executor
resCodec sqlResultCodec
// TODO: V4 Remove this
deprecated bool
resCodecDeprecated sqlResultCodecDeprecated
queryStr string
dynQuery *field.Expression
query *sql.Stmt
closeChan chan struct{}
closedChan chan struct{}
closeOnce sync.Once
mCount metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewSQL returns a SQL processor.
func NewSQL(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
deprecated := false
dsn := conf.SQL.DataSourceName
if len(conf.SQL.DSN) > 0 {
if len(dsn) > 0 {
return nil, errors.New("specified both a deprecated `dsn` as well as a `data_source_name`")
}
dsn = conf.SQL.DSN
deprecated = true
}
if len(conf.SQL.Args) > 0 && conf.SQL.ArgsMapping != "" {
return nil, errors.New("cannot specify both `args` and an `args_mapping` in the same processor")
}
var argsMapping *mapping.Executor
if conf.SQL.ArgsMapping != "" {
if deprecated {
return nil, errors.New("the field `args_mapping` cannot be used when running the `sql` processor in deprecated mode (using the `dsn` field), use the `data_source_name` field instead")
}
log.Warnln("using unsafe_dynamic_query leaves you vulnerable to SQL injection attacks")
var err error
if argsMapping, err = interop.NewBloblangMapping(mgr, conf.SQL.ArgsMapping); err != nil {
return nil, fmt.Errorf("failed to parse `args_mapping`: %w", err)
}
}
var args []*field.Expression
for i, v := range conf.SQL.Args {
expr, err := interop.NewBloblangField(mgr, v)
if err != nil {
return nil, fmt.Errorf("failed to parse arg %v expression: %v", i, err)
}
args = append(args, expr)
}
if conf.SQL.Driver == "mssql" {
// For MSSQL, if the user part of the connection string is in the
// `DOMAIN\username` format, then the backslash character needs to be
// URL-encoded.
conf.SQL.DataSourceName = strings.ReplaceAll(conf.SQL.DataSourceName, `\`, "%5C")
}
s := &SQL{
log: log,
stats: stats,
conf: conf.SQL,
args: args,
argsMapping: argsMapping,
queryStr: conf.SQL.Query,
deprecated: deprecated,
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
mCount: stats.GetCounter("count"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
var err error
if deprecated {
s.log.Warnln("Using deprecated SQL functionality due to use of field 'dsn'. To switch to the new processor use the field 'data_source_name' instead. The new processor is not backwards compatible due to differences in how message batches are processed. For more information check out the docs at https://www.benthos.dev/docs/components/processors/sql.")
if conf.SQL.Driver != "mysql" && conf.SQL.Driver != "postgres" && conf.SQL.Driver != "mssql" {
return nil, fmt.Errorf("driver '%v' is not supported with deprecated SQL features (using field 'dsn')", conf.SQL.Driver)
}
if s.resCodecDeprecated, err = strToSQLResultCodecDeprecated(conf.SQL.ResultCodec); err != nil {
return nil, err
}
} else if s.resCodec, err = strToSQLResultCodec(conf.SQL.ResultCodec); err != nil {
return nil, err
}
if s.db, err = sql.Open(conf.SQL.Driver, dsn); err != nil {
return nil, err
}
if conf.SQL.UnsafeDynamicQuery {
if deprecated {
return nil, errors.New("cannot use dynamic queries when running in deprecated mode")
}
if s.dynQuery, err = interop.NewBloblangField(mgr, s.queryStr); err != nil {
return nil, fmt.Errorf("failed to parse dynamic query expression: %v", err)
}
}
isSelectQuery := s.resCodecDeprecated != nil || s.resCodec != nil
// Some drivers only support transactional prepared inserts.
if s.dynQuery == nil && (isSelectQuery || !insertRequiresTransactionPrepare(conf.SQL.Driver)) {
if s.query, err = s.db.Prepare(s.queryStr); err != nil {
s.db.Close()
return nil, fmt.Errorf("failed to prepare query: %v", err)
}
}
go func() {
defer func() {
s.dbMux.Lock()
s.db.Close()
if s.query != nil {
s.query.Close()
}
s.dbMux.Unlock()
close(s.closedChan)
}()
<-s.closeChan
}()
return s, nil
}
//------------------------------------------------------------------------------
type sqlResultCodec func(rows *sql.Rows, part types.Part) error
func sqlResultJSONArrayCodec(rows *sql.Rows, part types.Part) error {
columnNames, err := rows.Columns()
if err != nil {
return err
}
jArray := []interface{}{}
for rows.Next() {
values := make([]interface{}, len(columnNames))
valuesWrapped := make([]interface{}, len(columnNames))
for i := range values {
valuesWrapped[i] = &values[i]
}
if err := rows.Scan(valuesWrapped...); err != nil {
return err
}
jObj := map[string]interface{}{}
for i, v := range values {
switch t := v.(type) {
case string:
jObj[columnNames[i]] = t
case []byte:
jObj[columnNames[i]] = string(t)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
jObj[columnNames[i]] = t
case float32, float64:
jObj[columnNames[i]] = t
case bool:
jObj[columnNames[i]] = t
default:
jObj[columnNames[i]] = t
}
}
jArray = append(jArray, jObj)
}
if err := rows.Err(); err != nil {
return err
}
return part.SetJSON(jArray)
}
func strToSQLResultCodec(codec string) (sqlResultCodec, error) {
switch codec {
case "json_array":
return sqlResultJSONArrayCodec, nil
case "none":
return nil, nil
}
return nil, fmt.Errorf("unrecognised result codec: %v", codec)
}
//------------------------------------------------------------------------------
func (s *SQL) doExecute(argSets [][]interface{}) (errs []error) {
var err error
defer func() {
if err != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
for i := range errs {
if errs[i] == nil {
errs[i] = err
}
}
}
}()
var tx *sql.Tx
if tx, err = s.db.Begin(); err != nil {
return
}
stmt := s.query
if stmt == nil {
if stmt, err = tx.Prepare(s.queryStr); err != nil {
return
}
defer stmt.Close()
} else {
stmt = tx.Stmt(stmt)
}
for i, args := range argSets {
if len(args) == 0 {
continue
}
if _, serr := stmt.Exec(args...); serr != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
errs[i] = serr
}
}
err = tx.Commit()
return
}
func (s *SQL) getArgs(index int, msg types.Message) ([]interface{}, error) {
if len(s.args) > 0 {
args := make([]interface{}, len(s.args))
for i, v := range s.args {
args[i] = v.String(index, msg)
}
return args, nil
}
if s.argsMapping == nil {
return nil, nil
}
pargs, err := s.argsMapping.MapPart(index, msg)
if err != nil {
return nil, err
}
iargs, err := pargs.JSON()
if err != nil {
return nil, fmt.Errorf("mapping returned non-structured result: %w", err)
}
args, ok := iargs.([]interface{})
if !ok {
return nil, fmt.Errorf("mapping returned non-array result: %T", iargs)
}
return args, nil
}
// ProcessMessage logs an event and returns the message unchanged.
func (s *SQL) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
s.dbMux.RLock()
defer s.dbMux.RUnlock()
if s.deprecated {
return s.processMessageDeprecated(msg)
}
s.mCount.Incr(1)
newMsg := msg.Copy()
if s.resCodec == nil && s.dynQuery == nil {
argSets := make([][]interface{}, newMsg.Len())
newMsg.Iter(func(index int, p types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
FlagErr(newMsg.Get(index), err)
return nil
}
argSets[index] = args
return nil
})
for i, err := range s.doExecute(argSets) {
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
FlagErr(newMsg.Get(i), err)
}
}
} else {
IteratePartsWithSpanV2(TypeSQL, nil, newMsg, func(index int, span *tracing.Span, part types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
return err
}
if s.resCodec == nil {
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
_, err = s.db.Exec(queryStr, args...)
} else {
_, err = s.query.Exec(args...)
}
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
}
return nil
}
var rows *sql.Rows
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
rows, err = s.db.Query(queryStr, args...)
} else {
rows, err = s.query.Query(args...)
}
if err == nil {
defer rows.Close()
if err = s.resCodec(rows, part); err != nil {
err = fmt.Errorf("failed to apply result codec: %v", err)
}
} else {
err = fmt.Errorf("failed to execute query: %v", err)
}
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
return err
}
return nil
})
}
s.mBatchSent.Incr(1)
s.mSent.Incr(int64(newMsg.Len()))
msgs := [1]types.Message{newMsg}
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (s *SQL) CloseAsync() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// WaitForClose blocks until the processor has closed down.
func (s *SQL) WaitForClose(timeout time.Duration) error {
select {
case <-time.After(timeout):
return types.ErrTimeout
case <-s.closedChan:
}
return nil
}
//------------------------------------------------------------------------------
| {
_, exists := map[string]struct{}{
"clickhouse": {},
}[driver]
return exists
} | identifier_body |
sql.go | package processor
import (
"database/sql"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/bloblang/field"
"github.com/Jeffail/benthos/v3/internal/bloblang/mapping"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/internal/tracing"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
// SQL Drivers
_ "github.com/ClickHouse/clickhouse-go"
_ "github.com/denisenkom/go-mssqldb"
_ "github.com/go-sql-driver/mysql"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSQL] = TypeSpec{
constructor: NewSQL,
Categories: []Category{
CategoryIntegration,
},
Status: docs.StatusDeprecated,
Summary: `
Runs an SQL prepared query against a target database for each message and, for
queries that return rows, replaces it with the result according to a
[codec](#result-codecs).`,
Description: `
## Alternatives
Use either the ` + "[`sql_insert`](/docs/components/processors/sql_insert)" + ` or the ` + "[`sql_select`](/docs/components/processors/sql_select)" + ` processor instead.
If a query contains arguments they can be set as an array of strings supporting
[interpolation functions](/docs/configuration/interpolation#bloblang-queries) in
the ` + "`args`" + ` field.
## Drivers
The following is a list of supported drivers and their respective DSN formats:
| Driver | Data Source Name Format |
|---|---|
` + "| `clickhouse` | [`tcp://[netloc][:port][?param1=value1&...¶mN=valueN]`](https://github.com/ClickHouse/clickhouse-go#dsn)" + `
` + "| `mysql` | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` |" + `
` + "| `postgres` | `postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]` |" + `
` + "| `mssql` | `sqlserver://[user[:password]@][netloc][:port][?database=dbname¶m1=value1&...]` |" + `
Please note that the ` + "`postgres`" + ` driver enforces SSL by default, you
can override this with the parameter ` + "`sslmode=disable`" + ` if required.`,
Examples: []docs.AnnotatedExample{
{
Title: "Table Insert (MySQL)",
Summary: `
The following example inserts rows into the table footable with the columns foo,
bar and baz populated with values extracted from messages:`,
Config: `
pipeline:
processors:
- sql:
driver: mysql
data_source_name: foouser:foopassword@tcp(localhost:3306)/foodb
query: "INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);"
args_mapping: '[ document.foo, document.bar, meta("kafka_topic") ]'
`,
},
{
Title: "Table Query (PostgreSQL)",
Summary: `
Here we query a database for columns of footable that share a ` + "`user_id`" + `
with the message ` + "`user.id`" + `. The ` + "`result_codec`" + ` is set to
` + "`json_array`" + ` and a ` + "[`branch` processor](/docs/components/processors/branch)" + `
is used in order to insert the resulting array into the original message at the
path ` + "`foo_rows`" + `:`,
Config: `
pipeline:
processors:
- branch:
processors:
- sql:
driver: postgres
result_codec: json_array
data_source_name: postgres://foouser:foopass@localhost:5432/testdb?sslmode=disable
query: "SELECT * FROM footable WHERE user_id = $1;"
args_mapping: '[ this.user.id ]'
result_map: 'root.foo_rows = this'
`,
},
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"driver",
"A database [driver](#drivers) to use.",
).HasOptions("mysql", "postgres", "clickhouse", "mssql"),
docs.FieldCommon(
"data_source_name", "A Data Source Name to identify the target database.",
"tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000",
"foouser:foopassword@tcp(localhost:3306)/foodb",
"postgres://foouser:foopass@localhost:5432/foodb?sslmode=disable",
),
docs.FieldDeprecated("dsn", ""),
docs.FieldCommon(
"query", "The query to run against the database.",
"INSERT INTO footable (foo, bar, baz) VALUES (?, ?, ?);",
),
docs.FieldBool(
"unsafe_dynamic_query",
"Whether to enable dynamic queries that support interpolation functions. WARNING: This feature opens up the possibility of SQL injection attacks and is considered unsafe.",
).Advanced().HasDefault(false),
docs.FieldDeprecated(
"args",
"A list of arguments for the query to be resolved for each message.",
).IsInterpolated().Array(),
docs.FieldBloblang(
"args_mapping",
"A [Bloblang mapping](/docs/guides/bloblang/about) that produces the arguments for the query. The mapping must return an array containing the number of arguments in the query.",
`[ this.foo, this.bar.not_empty().catch(null), meta("baz") ]`,
`root = [ uuid_v4() ].merge(this.document.args)`,
).AtVersion("3.47.0"),
docs.FieldCommon(
"result_codec",
"A [codec](#result-codecs) to determine how resulting rows are converted into messages.",
).HasOptions("none", "json_array"),
},
Footnotes: `
## Result Codecs
When a query returns rows they are serialised according to a chosen codec, and
the message contents are replaced with the serialised result.
### ` + "`none`" + `
The result of the query is ignored and the message remains unchanged. If your
query does not return rows then this is the appropriate codec.
### ` + "`json_array`" + `
The resulting rows are serialised into an array of JSON objects, where each
object represents a row, where the key is the column name and the value is that | }
//------------------------------------------------------------------------------
// SQLConfig contains configuration fields for the SQL processor.
type SQLConfig struct {
Driver string `json:"driver" yaml:"driver"`
DataSourceName string `json:"data_source_name" yaml:"data_source_name"`
DSN string `json:"dsn" yaml:"dsn"`
Query string `json:"query" yaml:"query"`
UnsafeDynamicQuery bool `json:"unsafe_dynamic_query" yaml:"unsafe_dynamic_query"`
Args []string `json:"args" yaml:"args"`
ArgsMapping string `json:"args_mapping" yaml:"args_mapping"`
ResultCodec string `json:"result_codec" yaml:"result_codec"`
}
// NewSQLConfig returns a SQLConfig with default values.
func NewSQLConfig() SQLConfig {
return SQLConfig{
Driver: "mysql",
DataSourceName: "",
DSN: "",
Query: "",
UnsafeDynamicQuery: false,
Args: []string{},
ArgsMapping: "",
ResultCodec: "none",
}
}
//------------------------------------------------------------------------------
// Some SQL drivers (such as clickhouse) require prepared inserts to be local to
// a transaction, rather than general.
func insertRequiresTransactionPrepare(driver string) bool {
_, exists := map[string]struct{}{
"clickhouse": {},
}[driver]
return exists
}
//------------------------------------------------------------------------------
// SQL is a processor that executes an SQL query for each message.
type SQL struct {
log log.Modular
stats metrics.Type
conf SQLConfig
db *sql.DB
dbMux sync.RWMutex
args []*field.Expression
argsMapping *mapping.Executor
resCodec sqlResultCodec
// TODO: V4 Remove this
deprecated bool
resCodecDeprecated sqlResultCodecDeprecated
queryStr string
dynQuery *field.Expression
query *sql.Stmt
closeChan chan struct{}
closedChan chan struct{}
closeOnce sync.Once
mCount metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewSQL returns a SQL processor.
func NewSQL(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
deprecated := false
dsn := conf.SQL.DataSourceName
if len(conf.SQL.DSN) > 0 {
if len(dsn) > 0 {
return nil, errors.New("specified both a deprecated `dsn` as well as a `data_source_name`")
}
dsn = conf.SQL.DSN
deprecated = true
}
if len(conf.SQL.Args) > 0 && conf.SQL.ArgsMapping != "" {
return nil, errors.New("cannot specify both `args` and an `args_mapping` in the same processor")
}
var argsMapping *mapping.Executor
if conf.SQL.ArgsMapping != "" {
if deprecated {
return nil, errors.New("the field `args_mapping` cannot be used when running the `sql` processor in deprecated mode (using the `dsn` field), use the `data_source_name` field instead")
}
log.Warnln("using unsafe_dynamic_query leaves you vulnerable to SQL injection attacks")
var err error
if argsMapping, err = interop.NewBloblangMapping(mgr, conf.SQL.ArgsMapping); err != nil {
return nil, fmt.Errorf("failed to parse `args_mapping`: %w", err)
}
}
var args []*field.Expression
for i, v := range conf.SQL.Args {
expr, err := interop.NewBloblangField(mgr, v)
if err != nil {
return nil, fmt.Errorf("failed to parse arg %v expression: %v", i, err)
}
args = append(args, expr)
}
if conf.SQL.Driver == "mssql" {
// For MSSQL, if the user part of the connection string is in the
// `DOMAIN\username` format, then the backslash character needs to be
// URL-encoded.
conf.SQL.DataSourceName = strings.ReplaceAll(conf.SQL.DataSourceName, `\`, "%5C")
}
s := &SQL{
log: log,
stats: stats,
conf: conf.SQL,
args: args,
argsMapping: argsMapping,
queryStr: conf.SQL.Query,
deprecated: deprecated,
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
mCount: stats.GetCounter("count"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
var err error
if deprecated {
s.log.Warnln("Using deprecated SQL functionality due to use of field 'dsn'. To switch to the new processor use the field 'data_source_name' instead. The new processor is not backwards compatible due to differences in how message batches are processed. For more information check out the docs at https://www.benthos.dev/docs/components/processors/sql.")
if conf.SQL.Driver != "mysql" && conf.SQL.Driver != "postgres" && conf.SQL.Driver != "mssql" {
return nil, fmt.Errorf("driver '%v' is not supported with deprecated SQL features (using field 'dsn')", conf.SQL.Driver)
}
if s.resCodecDeprecated, err = strToSQLResultCodecDeprecated(conf.SQL.ResultCodec); err != nil {
return nil, err
}
} else if s.resCodec, err = strToSQLResultCodec(conf.SQL.ResultCodec); err != nil {
return nil, err
}
if s.db, err = sql.Open(conf.SQL.Driver, dsn); err != nil {
return nil, err
}
if conf.SQL.UnsafeDynamicQuery {
if deprecated {
return nil, errors.New("cannot use dynamic queries when running in deprecated mode")
}
if s.dynQuery, err = interop.NewBloblangField(mgr, s.queryStr); err != nil {
return nil, fmt.Errorf("failed to parse dynamic query expression: %v", err)
}
}
isSelectQuery := s.resCodecDeprecated != nil || s.resCodec != nil
// Some drivers only support transactional prepared inserts.
if s.dynQuery == nil && (isSelectQuery || !insertRequiresTransactionPrepare(conf.SQL.Driver)) {
if s.query, err = s.db.Prepare(s.queryStr); err != nil {
s.db.Close()
return nil, fmt.Errorf("failed to prepare query: %v", err)
}
}
go func() {
defer func() {
s.dbMux.Lock()
s.db.Close()
if s.query != nil {
s.query.Close()
}
s.dbMux.Unlock()
close(s.closedChan)
}()
<-s.closeChan
}()
return s, nil
}
//------------------------------------------------------------------------------
type sqlResultCodec func(rows *sql.Rows, part types.Part) error
func sqlResultJSONArrayCodec(rows *sql.Rows, part types.Part) error {
columnNames, err := rows.Columns()
if err != nil {
return err
}
jArray := []interface{}{}
for rows.Next() {
values := make([]interface{}, len(columnNames))
valuesWrapped := make([]interface{}, len(columnNames))
for i := range values {
valuesWrapped[i] = &values[i]
}
if err := rows.Scan(valuesWrapped...); err != nil {
return err
}
jObj := map[string]interface{}{}
for i, v := range values {
switch t := v.(type) {
case string:
jObj[columnNames[i]] = t
case []byte:
jObj[columnNames[i]] = string(t)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
jObj[columnNames[i]] = t
case float32, float64:
jObj[columnNames[i]] = t
case bool:
jObj[columnNames[i]] = t
default:
jObj[columnNames[i]] = t
}
}
jArray = append(jArray, jObj)
}
if err := rows.Err(); err != nil {
return err
}
return part.SetJSON(jArray)
}
func strToSQLResultCodec(codec string) (sqlResultCodec, error) {
switch codec {
case "json_array":
return sqlResultJSONArrayCodec, nil
case "none":
return nil, nil
}
return nil, fmt.Errorf("unrecognised result codec: %v", codec)
}
//------------------------------------------------------------------------------
func (s *SQL) doExecute(argSets [][]interface{}) (errs []error) {
var err error
defer func() {
if err != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
for i := range errs {
if errs[i] == nil {
errs[i] = err
}
}
}
}()
var tx *sql.Tx
if tx, err = s.db.Begin(); err != nil {
return
}
stmt := s.query
if stmt == nil {
if stmt, err = tx.Prepare(s.queryStr); err != nil {
return
}
defer stmt.Close()
} else {
stmt = tx.Stmt(stmt)
}
for i, args := range argSets {
if len(args) == 0 {
continue
}
if _, serr := stmt.Exec(args...); serr != nil {
if len(errs) == 0 {
errs = make([]error, len(argSets))
}
errs[i] = serr
}
}
err = tx.Commit()
return
}
func (s *SQL) getArgs(index int, msg types.Message) ([]interface{}, error) {
if len(s.args) > 0 {
args := make([]interface{}, len(s.args))
for i, v := range s.args {
args[i] = v.String(index, msg)
}
return args, nil
}
if s.argsMapping == nil {
return nil, nil
}
pargs, err := s.argsMapping.MapPart(index, msg)
if err != nil {
return nil, err
}
iargs, err := pargs.JSON()
if err != nil {
return nil, fmt.Errorf("mapping returned non-structured result: %w", err)
}
args, ok := iargs.([]interface{})
if !ok {
return nil, fmt.Errorf("mapping returned non-array result: %T", iargs)
}
return args, nil
}
// ProcessMessage logs an event and returns the message unchanged.
func (s *SQL) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
s.dbMux.RLock()
defer s.dbMux.RUnlock()
if s.deprecated {
return s.processMessageDeprecated(msg)
}
s.mCount.Incr(1)
newMsg := msg.Copy()
if s.resCodec == nil && s.dynQuery == nil {
argSets := make([][]interface{}, newMsg.Len())
newMsg.Iter(func(index int, p types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
FlagErr(newMsg.Get(index), err)
return nil
}
argSets[index] = args
return nil
})
for i, err := range s.doExecute(argSets) {
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
FlagErr(newMsg.Get(i), err)
}
}
} else {
IteratePartsWithSpanV2(TypeSQL, nil, newMsg, func(index int, span *tracing.Span, part types.Part) error {
args, err := s.getArgs(index, msg)
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("Args mapping error: %v\n", err)
return err
}
if s.resCodec == nil {
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
_, err = s.db.Exec(queryStr, args...)
} else {
_, err = s.query.Exec(args...)
}
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
}
return nil
}
var rows *sql.Rows
if s.dynQuery != nil {
queryStr := s.dynQuery.String(index, msg)
rows, err = s.db.Query(queryStr, args...)
} else {
rows, err = s.query.Query(args...)
}
if err == nil {
defer rows.Close()
if err = s.resCodec(rows, part); err != nil {
err = fmt.Errorf("failed to apply result codec: %v", err)
}
} else {
err = fmt.Errorf("failed to execute query: %v", err)
}
if err != nil {
s.mErr.Incr(1)
s.log.Errorf("SQL error: %v\n", err)
return err
}
return nil
})
}
s.mBatchSent.Incr(1)
s.mSent.Incr(int64(newMsg.Len()))
msgs := [1]types.Message{newMsg}
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (s *SQL) CloseAsync() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// WaitForClose blocks until the processor has closed down.
func (s *SQL) WaitForClose(timeout time.Duration) error {
select {
case <-time.After(timeout):
return types.ErrTimeout
case <-s.closedChan:
}
return nil
}
//------------------------------------------------------------------------------ | columns value in the row.`,
} | random_line_split |
test_transformer.py | import sys
sys.path.append("./")
from torchtext.datasets import Multi30k
from torchtext.data import Field
from torchtext import data
import pickle
import models.transformer as h
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from metrics.metrics import bleu
import numpy as np
from torch.autograd import Variable
from utils import plot_training_curve,plot_loss_curves
from torch import nn
import torch
import time
import matplotlib.pyplot as plt
import seaborn
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(vars(new)["src"]))
max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)["trg"]) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def subsequent_mask(size):
|
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
# vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)
# print((vals*100).tolist())
# print([TRG.vocab.itos[idx] for idx in idxs])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
def visualise_attention(tgt_sent, sent):
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Encoder Layer", layer+1)
for h in range(4):
vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()
draw(vals, sent, sent if h ==0 else [], ax=axs[h])
plt.show()
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Decoder Self Layer", layer+1)
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()
draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
print("Decoder Src Layer", layer+1)
fig, axs = plt.subplots(1,4, figsize=(16, 5))
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()
draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data.item() * norm
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
def evaluate(data_iter, model, criterion):
model.eval()
with torch.no_grad():
eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model,
SimpleLossCompute(model.generator, criterion, opt=None))
return eval_loss
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = []
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens
total_loss.append(loss.item())
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss
SRC = Field(tokenize = "spacy",
tokenizer_language="de_core_news_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en_core_web_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 100
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)
,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
SRC.build_vocab(train_data.src, min_freq=2)
TRG.build_vocab(train_data.trg, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 64
train_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_name = "harvard_transformer2_state"
args = (INPUT_DIM, OUTPUT_DIM)
kwargs = {"N" : 6}
model = h.make_model(*args, **kwargs).to(device)
state = torch.load(model_name + ".pt", map_location=device)
model.load_state_dict(state["state_dict"])
losses = state["loss"]
pad_idx = TRG.vocab.stoi["<pad>"]
criterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)
test_losses = evaluate(test_iter, model, criterion_test)
losses["test"].append(test_losses)
test_loss = torch.tensor(sum(test_losses) / len(test_losses))
print(test_loss)
print('Perplexity:', torch.exp(test_loss))
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a man in a blue shirt is standing on a ladder and cleaning a window")
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a group of people stands in front of an igloo.")
sentence = [SRC.preprocess("ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .")]
real_translation = TRG.preprocess("a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .")
src = SRC.process(sentence).to(device).T
src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
model.eval()
out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
translation = ["<sos>"]
for i in range(1, out.size(1)):
sym = TRG.vocab.itos[out[0, i]]
translation.append(sym)
if sym == "<eos>":
break
print(' '.join(translation))
print(' '.join(real_translation))
# plot_loss_curves(losses["train"], losses["val"])
visualise_attention(translation, ["<sos>"] + sentence[0] + ["<eos>"])
# candidate = []
# reference = []
# for i, batch in enumerate(test_iter):
# src = batch.src.transpose(0, 1)[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# model.eval()
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# print("Translation: \t", ' '.join(translation))
# target = []
# for i in range(1, batch.trg.size(0)):
# sym = TRG.vocab.itos[batch.trg.data[i, 0]]
# if sym == "<eos>": break
# target.append(sym)
# print("Target: \t", ' '.join(target))
# print()
# candidate.append(translation)
# reference.append([target])
# score = bleu(candidate, reference)
# print(score)
# # state["bleu"] = bleu
# # save_model_state("harvard_transformer2_state.pt", model, {"args" : args, "kwargs" : kwargs}, epoch+1, state["loss"], state["bleu"])
# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']
# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)
# model.eval()
# candidate = []
# reference = []
# for val in trainloader:
# de=val['de']
# en=val['en']
# de_tokens = [SRC.preprocess(sentence) for sentence in de]
# en_tokens = [TRG.preprocess(sentence) for sentence in en]
# src = SRC.process(de_tokens).to(device).T[:1]
# trg = TRG.process(en_tokens).to(device).T[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# target = []
# for i in range(1, trg.size(1)):
# sym = TRG.vocab.itos[trg[0, i]]
# if sym == "<eos>": break
# target.append(sym)
# candidate.append(translation)
# reference.append([target])
# print(bleu(candidate, reference))
| "Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0 | identifier_body |
test_transformer.py | import sys
sys.path.append("./")
from torchtext.datasets import Multi30k
from torchtext.data import Field
from torchtext import data
import pickle
import models.transformer as h
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from metrics.metrics import bleu
import numpy as np
from torch.autograd import Variable
from utils import plot_training_curve,plot_loss_curves
from torch import nn
import torch
import time
import matplotlib.pyplot as plt
import seaborn
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(vars(new)["src"]))
max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)["trg"]) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
|
return ys
def visualise_attention(tgt_sent, sent):
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Encoder Layer", layer+1)
for h in range(4):
vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()
draw(vals, sent, sent if h ==0 else [], ax=axs[h])
plt.show()
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Decoder Self Layer", layer+1)
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()
draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
print("Decoder Src Layer", layer+1)
fig, axs = plt.subplots(1,4, figsize=(16, 5))
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()
draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data.item() * norm
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
def evaluate(data_iter, model, criterion):
model.eval()
with torch.no_grad():
eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model,
SimpleLossCompute(model.generator, criterion, opt=None))
return eval_loss
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = []
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens
total_loss.append(loss.item())
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss
SRC = Field(tokenize = "spacy",
tokenizer_language="de_core_news_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en_core_web_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 100
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)
,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
SRC.build_vocab(train_data.src, min_freq=2)
TRG.build_vocab(train_data.trg, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 64
train_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_name = "harvard_transformer2_state"
args = (INPUT_DIM, OUTPUT_DIM)
kwargs = {"N" : 6}
model = h.make_model(*args, **kwargs).to(device)
state = torch.load(model_name + ".pt", map_location=device)
model.load_state_dict(state["state_dict"])
losses = state["loss"]
pad_idx = TRG.vocab.stoi["<pad>"]
criterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)
test_losses = evaluate(test_iter, model, criterion_test)
losses["test"].append(test_losses)
test_loss = torch.tensor(sum(test_losses) / len(test_losses))
print(test_loss)
print('Perplexity:', torch.exp(test_loss))
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a man in a blue shirt is standing on a ladder and cleaning a window")
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a group of people stands in front of an igloo.")
sentence = [SRC.preprocess("ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .")]
real_translation = TRG.preprocess("a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .")
src = SRC.process(sentence).to(device).T
src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
model.eval()
out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
translation = ["<sos>"]
for i in range(1, out.size(1)):
sym = TRG.vocab.itos[out[0, i]]
translation.append(sym)
if sym == "<eos>":
break
print(' '.join(translation))
print(' '.join(real_translation))
# plot_loss_curves(losses["train"], losses["val"])
visualise_attention(translation, ["<sos>"] + sentence[0] + ["<eos>"])
# candidate = []
# reference = []
# for i, batch in enumerate(test_iter):
# src = batch.src.transpose(0, 1)[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# model.eval()
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# print("Translation: \t", ' '.join(translation))
# target = []
# for i in range(1, batch.trg.size(0)):
# sym = TRG.vocab.itos[batch.trg.data[i, 0]]
# if sym == "<eos>": break
# target.append(sym)
# print("Target: \t", ' '.join(target))
# print()
# candidate.append(translation)
# reference.append([target])
# score = bleu(candidate, reference)
# print(score)
# # state["bleu"] = bleu
# # save_model_state("harvard_transformer2_state.pt", model, {"args" : args, "kwargs" : kwargs}, epoch+1, state["loss"], state["bleu"])
# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']
# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)
# model.eval()
# candidate = []
# reference = []
# for val in trainloader:
# de=val['de']
# en=val['en']
# de_tokens = [SRC.preprocess(sentence) for sentence in de]
# en_tokens = [TRG.preprocess(sentence) for sentence in en]
# src = SRC.process(de_tokens).to(device).T[:1]
# trg = TRG.process(en_tokens).to(device).T[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# target = []
# for i in range(1, trg.size(1)):
# sym = TRG.vocab.itos[trg[0, i]]
# if sym == "<eos>": break
# target.append(sym)
# candidate.append(translation)
# reference.append([target])
# print(bleu(candidate, reference))
| out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
# vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)
# print((vals*100).tolist())
# print([TRG.vocab.itos[idx] for idx in idxs])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1) | conditional_block |
test_transformer.py | import sys
sys.path.append("./")
from torchtext.datasets import Multi30k
from torchtext.data import Field
from torchtext import data
import pickle
import models.transformer as h
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from metrics.metrics import bleu
import numpy as np
from torch.autograd import Variable
from utils import plot_training_curve,plot_loss_curves
from torch import nn
import torch
import time
import matplotlib.pyplot as plt
import seaborn
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(vars(new)["src"]))
max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)["trg"]) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch:
"Object for holding a batch of data with mask during training."
def | (self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
# vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)
# print((vals*100).tolist())
# print([TRG.vocab.itos[idx] for idx in idxs])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
def visualise_attention(tgt_sent, sent):
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Encoder Layer", layer+1)
for h in range(4):
vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()
draw(vals, sent, sent if h ==0 else [], ax=axs[h])
plt.show()
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Decoder Self Layer", layer+1)
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()
draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
print("Decoder Src Layer", layer+1)
fig, axs = plt.subplots(1,4, figsize=(16, 5))
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()
draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data.item() * norm
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
def evaluate(data_iter, model, criterion):
model.eval()
with torch.no_grad():
eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model,
SimpleLossCompute(model.generator, criterion, opt=None))
return eval_loss
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = []
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens
total_loss.append(loss.item())
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss
SRC = Field(tokenize = "spacy",
tokenizer_language="de_core_news_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en_core_web_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 100
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)
,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
SRC.build_vocab(train_data.src, min_freq=2)
TRG.build_vocab(train_data.trg, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 64
train_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_name = "harvard_transformer2_state"
args = (INPUT_DIM, OUTPUT_DIM)
kwargs = {"N" : 6}
model = h.make_model(*args, **kwargs).to(device)
state = torch.load(model_name + ".pt", map_location=device)
model.load_state_dict(state["state_dict"])
losses = state["loss"]
pad_idx = TRG.vocab.stoi["<pad>"]
criterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)
test_losses = evaluate(test_iter, model, criterion_test)
losses["test"].append(test_losses)
test_loss = torch.tensor(sum(test_losses) / len(test_losses))
print(test_loss)
print('Perplexity:', torch.exp(test_loss))
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a man in a blue shirt is standing on a ladder and cleaning a window")
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a group of people stands in front of an igloo.")
sentence = [SRC.preprocess("ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .")]
real_translation = TRG.preprocess("a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .")
src = SRC.process(sentence).to(device).T
src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
model.eval()
out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
translation = ["<sos>"]
for i in range(1, out.size(1)):
sym = TRG.vocab.itos[out[0, i]]
translation.append(sym)
if sym == "<eos>":
break
print(' '.join(translation))
print(' '.join(real_translation))
# plot_loss_curves(losses["train"], losses["val"])
visualise_attention(translation, ["<sos>"] + sentence[0] + ["<eos>"])
# candidate = []
# reference = []
# for i, batch in enumerate(test_iter):
# src = batch.src.transpose(0, 1)[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# model.eval()
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# print("Translation: \t", ' '.join(translation))
# target = []
# for i in range(1, batch.trg.size(0)):
# sym = TRG.vocab.itos[batch.trg.data[i, 0]]
# if sym == "<eos>": break
# target.append(sym)
# print("Target: \t", ' '.join(target))
# print()
# candidate.append(translation)
# reference.append([target])
# score = bleu(candidate, reference)
# print(score)
# # state["bleu"] = bleu
# # save_model_state("harvard_transformer2_state.pt", model, {"args" : args, "kwargs" : kwargs}, epoch+1, state["loss"], state["bleu"])
# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']
# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)
# model.eval()
# candidate = []
# reference = []
# for val in trainloader:
# de=val['de']
# en=val['en']
# de_tokens = [SRC.preprocess(sentence) for sentence in de]
# en_tokens = [TRG.preprocess(sentence) for sentence in en]
# src = SRC.process(de_tokens).to(device).T[:1]
# trg = TRG.process(en_tokens).to(device).T[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# target = []
# for i in range(1, trg.size(1)):
# sym = TRG.vocab.itos[trg[0, i]]
# if sym == "<eos>": break
# target.append(sym)
# candidate.append(translation)
# reference.append([target])
# print(bleu(candidate, reference))
| __init__ | identifier_name |
test_transformer.py | import sys
sys.path.append("./")
from torchtext.datasets import Multi30k
from torchtext.data import Field
from torchtext import data
import pickle
import models.transformer as h
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from metrics.metrics import bleu
import numpy as np
from torch.autograd import Variable
from utils import plot_training_curve,plot_loss_curves
from torch import nn
import torch
import time
import matplotlib.pyplot as plt
import seaborn
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(vars(new)["src"]))
max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)["trg"]) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
# vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)
# print((vals*100).tolist())
# print([TRG.vocab.itos[idx] for idx in idxs])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
def visualise_attention(tgt_sent, sent):
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Encoder Layer", layer+1)
for h in range(4):
vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()
draw(vals, sent, sent if h ==0 else [], ax=axs[h])
plt.show()
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Decoder Self Layer", layer+1)
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()
draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
print("Decoder Src Layer", layer+1)
fig, axs = plt.subplots(1,4, figsize=(16, 5))
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()
draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data.item() * norm
def rebatch(pad_idx, batch): | "Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
def evaluate(data_iter, model, criterion):
model.eval()
with torch.no_grad():
eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model,
SimpleLossCompute(model.generator, criterion, opt=None))
return eval_loss
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = []
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens
total_loss.append(loss.item())
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss
SRC = Field(tokenize = "spacy",
tokenizer_language="de_core_news_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en_core_web_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 100
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)
,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
SRC.build_vocab(train_data.src, min_freq=2)
TRG.build_vocab(train_data.trg, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 64
train_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_name = "harvard_transformer2_state"
args = (INPUT_DIM, OUTPUT_DIM)
kwargs = {"N" : 6}
model = h.make_model(*args, **kwargs).to(device)
state = torch.load(model_name + ".pt", map_location=device)
model.load_state_dict(state["state_dict"])
losses = state["loss"]
pad_idx = TRG.vocab.stoi["<pad>"]
criterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)
test_losses = evaluate(test_iter, model, criterion_test)
losses["test"].append(test_losses)
test_loss = torch.tensor(sum(test_losses) / len(test_losses))
print(test_loss)
print('Perplexity:', torch.exp(test_loss))
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a man in a blue shirt is standing on a ladder and cleaning a window")
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a group of people stands in front of an igloo.")
sentence = [SRC.preprocess("ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .")]
real_translation = TRG.preprocess("a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .")
src = SRC.process(sentence).to(device).T
src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
model.eval()
out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
translation = ["<sos>"]
for i in range(1, out.size(1)):
sym = TRG.vocab.itos[out[0, i]]
translation.append(sym)
if sym == "<eos>":
break
print(' '.join(translation))
print(' '.join(real_translation))
# plot_loss_curves(losses["train"], losses["val"])
visualise_attention(translation, ["<sos>"] + sentence[0] + ["<eos>"])
# candidate = []
# reference = []
# for i, batch in enumerate(test_iter):
# src = batch.src.transpose(0, 1)[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# model.eval()
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# print("Translation: \t", ' '.join(translation))
# target = []
# for i in range(1, batch.trg.size(0)):
# sym = TRG.vocab.itos[batch.trg.data[i, 0]]
# if sym == "<eos>": break
# target.append(sym)
# print("Target: \t", ' '.join(target))
# print()
# candidate.append(translation)
# reference.append([target])
# score = bleu(candidate, reference)
# print(score)
# # state["bleu"] = bleu
# # save_model_state("harvard_transformer2_state.pt", model, {"args" : args, "kwargs" : kwargs}, epoch+1, state["loss"], state["bleu"])
# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']
# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)
# model.eval()
# candidate = []
# reference = []
# for val in trainloader:
# de=val['de']
# en=val['en']
# de_tokens = [SRC.preprocess(sentence) for sentence in de]
# en_tokens = [TRG.preprocess(sentence) for sentence in en]
# src = SRC.process(de_tokens).to(device).T[:1]
# trg = TRG.process(en_tokens).to(device).T[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# target = []
# for i in range(1, trg.size(1)):
# sym = TRG.vocab.itos[trg[0, i]]
# if sym == "<eos>": break
# target.append(sym)
# candidate.append(translation)
# reference.append([target])
# print(bleu(candidate, reference)) | random_line_split |
|
RBF_full_NEW.py | import numpy as np
import matplotlib.pyplot as plt
from functions import comb_dataset, read_csv_fast
import os
import pandas as pd
def | (NUM_POINTS):
csv_data = read_csv_fast(os.path.dirname(os.path.realpath(__file__))+'/trajectories/right_100.csv')
timestamps = csv_data[:,0]
duration = timestamps[-1] - timestamps[0]
interpolated_duration_list = [0]
for i in range(NUM_POINTS-2):
interpolated_duration_list.append(np.nan)
interpolated_duration_list.append(duration)
series = pd.Series(interpolated_duration_list)
result = series.interpolate()
return np.array(result)
def normalize_time(full_timestamps, half_timestamp):
"""
Computes phase from given timestamps. Phase is normalized time from 0 to 1.
"""
phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])
return phases
def learn_weights(norm_data, PSIs_matrix, LAMBDA_COEFF=1e-12):
"""
:param norm_data: predifined trajectories -> data to learn weights
:param PSIs_matrix: matrix of basis kernel functions (taken from compute_feature_matrix)
:return: learned weights
"""
# Find out the number of basis functions
N = PSIs_matrix.shape[1]
# Find out the dimentionality of trajectories (x and y)
dof = norm_data[0].shape[1]
# There is a matrix of zeros (#Dofs x #basis functions) of weights for each of the demonstrations.
weights = np.zeros((norm_data.shape[0], dof, N))
# fill weights matrix
for index in range(norm_data.shape[0]):
for i in range(dof):
# In case some regularization is necessary
# weights[index][i] = np.dot(np.linalg.inv(np.dot(PSIs_matrix, PSIs_matrix.T) + 10e-12 * np.eye(np.dot(PSIs_matrix, PSIs_matrix.T).shape[0])), np.dot(PSIs_matrix, norm_data[index][:,i]))
# weights[index][i] = np.dot(np.linalg.pinv(
# np.dot(PSIs_matrix, PSIs_matrix.T) + LAMBDA_COEFF * np.identity(PSIs_matrix.shape[0])),
# np.dot(PSIs_matrix, norm_data[index][:, i]))
A = np.dot(PSIs_matrix.T, PSIs_matrix) + LAMBDA_COEFF * np.identity(N)
B = np.dot(PSIs_matrix.T, norm_data[index][:, i])
weights[index,i,:] = np.linalg.solve(A, B)
return weights
def compute_feature_matrix(phases, N, h):
"""
Compute a TxN matrix of features of the given phase vector using Gaussian basis functions.
Where T is the number of elements in the phase vector and N is the number of basis functions.
Parameters
----------
numpy.ndarray
phases: vector with phases
int
N: number of basis functions in the resulting matrix
float
h: width of a basis function (variance)
Returns
-------
numpy.ndarray
TxN matrix of Gaussian basis functions
"""
T = len(phases)
# Uniformly distribute the centers of N basis functions in domain[-2h,2h+1].
centers = np.linspace(-2 * h, 1 + 2 * h, num=N)
# compute a TxN matrix with centers
C = np.repeat(centers.reshape(1, N), T, axis=0)
# compute a TxN matrix with phases
P = np.repeat(phases.reshape(T, 1), N, axis=1)
# compute a TxN feature matrix
Phi = np.exp(- 0.5 / h * np.square(P - C))
# normalize the feature matrix
Phi = Phi / np.sum(Phi, axis=1).reshape(T, 1)
return Phi
####################################################
################ Call For Functions ################
####################################################
# parameters
N = 8 # Number of basis functions
h = 0.1 #1.0 / (N * N)
ridge_factor = 1e-12
time_steps = 100
dataset = comb_dataset(time_steps)[:3] # get dataset
num_right_traj = len(dataset[0]) # number of right trajectories in dataset
M = 2 # dimentionality
###### Prepare Matrix for Basis Functions ######
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
percentageFULL = 100
part_timestamps = (len(interpolated_timestamps) * (percentageFULL) / 100)
interpolated_timestamps_full = interpolated_timestamps[0:part_timestamps]
phase_full = normalize_time(interpolated_timestamps, interpolated_timestamps_full)
# z = np.linspace(0, 1, 100) # starting at 0, ending at 1
# a= z[:,None]
phases_full = phase_full[:, None]
psi = compute_feature_matrix(phases_full, N, h) # shape (6, 100)
###### Calculate WEIGHTS ######
weights_right = learn_weights(dataset[0], psi)
###### Calculate MEAN of weights ######
weight_mean_right_x, weight_mean_right_y = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
weights_mean_right_x, weights_mean_right_y = weight_mean_right_x[:,None], weight_mean_right_y[:,None] # shape (6, 1)
combined_weights_mean_xy = [weights_mean_right_x[:, 0], weights_mean_right_y[:, 0]]
###### Reconstructed Trajectory ######
x_weights_right, y_weights_right = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
reconstr_traj_mean_right_x, reconstr_traj_mean_right_y = np.dot(psi, x_weights_right[:,None]).reshape([time_steps]), np.dot(psi, y_weights_right[:,None]).reshape([time_steps])
###### Calculate COVARIANCE of weights ######
weights_cov_right_x = np.cov(weights_right[:,0].T) # shape (6, 6)
weights_cov_right_y = np.cov(weights_right[:,1].T)
combined_cov_right_xy = [weights_cov_right_x, weights_cov_right_y]
###### bound calculation for mean ######
traj_cov_x_diag = np.sum(psi.dot(weights_cov_right_x) * psi, axis=1)
std_x = np.sqrt(traj_cov_x_diag)
bound_upp_x = reconstr_traj_mean_right_x + 2 * std_x
bound_bottom_x = reconstr_traj_mean_right_x - 2 * std_x
traj_cov_y_diag = np.sum(psi.dot(weights_cov_right_y) * psi, axis=1)
std_y = np.sqrt(traj_cov_y_diag)
bound_upp_y = reconstr_traj_mean_right_y + 2 * std_y
bound_bottom_y = reconstr_traj_mean_right_y - 2 * std_y
fig, ax = plt.subplots()
plt.figure(1)
plt.plot(reconstr_traj_mean_right_x, 'black')
plt.fill_between(np.arange(time_steps), bound_upp_x, bound_bottom_x, alpha = 0.5, color = 'red', linewidth = 1)
plt.show()
###########################################################
###### Calculate of NEW MEAN & COVARIANCE | 3rd step ######
###########################################################
test_traj_right = np.mean(dataset[0], axis=0) # mean trajectory from RIGHT DB
percentage = 10
part = (len(test_traj_right) * (percentage) / 100)
test_traj_right_NOTfull = test_traj_right[0:part]
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
part_timestamps = (len(interpolated_timestamps) * (percentage) / 100)
interpolated_timestamps_NOTfull = interpolated_timestamps[0:part_timestamps]
# print interpolated_timestamps_NOTfull
phase_NOTfull = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# print phase_NOTfull
# exit(1)
# phases = []
# for t in range(len(interpolated_timestamps_NOTfull)):
# phase = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# phases.append(phase)
#
# print phases
# # print phase
# # exit(1)
# feature matrix for current trajectory
N1 = 8
h1 = 0.1
obs_variance = 0.0005 #????
z1 = np.linspace(0, 1, len(test_traj_right_NOTfull)) # starting at 0, ending at 1
psi_new = compute_feature_matrix(phase_NOTfull, N1, h1) # shape (6, 10)
# print "shape: ", psi_new.shape
psi_mean = []
for a in range(len(psi_new)):
psi_mean.append(np.mean(psi_new[a]))
# compute w_mean and w_cov separately for each dimension
num_dimensions = 2
w_mean_new = [np.empty([N1]) for i in range(num_dimensions)]
w_cov_new = [np.empty([N1, N1]) for i in range(num_dimensions)]
for i in range(num_dimensions): # for BOTH DIMENTIONS
C = np.dot(combined_cov_right_xy[i], psi_new.T) # shape (8,10)
D = np.diag(np.sum(C * psi_new.T, axis=0) + obs_variance) # shape (10, 10)
b = test_traj_right_NOTfull[:, i] - np.dot(psi_new, combined_weights_mean_xy[i]) # shape 10
x = np.linalg.solve(D, b) # shape (10, 8)
Lb = np.dot(C, x) # shape 8
w_mean_new[ i] = combined_weights_mean_xy[i] + Lb
y = np.linalg.solve(D, C.T) # shape (10, 8)
La = np.dot(C, y) # shape (8, 8)
w_cov_new[i] = np.array(combined_cov_right_xy[i]) - La
# print "COV1: ", combined_cov_right_xy[i].shape
# print "C: ", C.shape
# print "D : ", D.shape
# print "meanALL: ", combined_weights_mean_xy[i].shape
# print "b: ", b.shape
# print "x: ", x.shape
# print "y: ", y.shape
# print "Lb: ", Lb.shape
# print "La: ", La.shape
###### Reconstructed NOT FULL Trajectory ######
reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new = np.dot(psi_new, w_mean_new[0]), np.dot(psi_new, w_mean_new[1])
###### Reconstructed PREDICTED FULL Trajectory ######
reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED = np.dot(psi, w_mean_new[0]), np.dot(psi, w_mean_new[1])
# print len(reconstr_traj_mean_right_x), len(reconstr_traj_mean_right_y)
# print len(reconstr_traj_mean_right_x_new), len(reconstr_traj_mean_right_y_new)
# print "full:", test_traj_right
# print "NOT_full:", test_traj_right_NOTfull
# print w_mean_new, len( w_mean_new)
############################################
################# PLOTTING #################
############################################
plt.title('')
plt.xlabel('x, [m]')
plt.ylabel('y, [m]')
labels = {
'real': 'Mean Trajectory from Demonstations',
'reconstructed': 'Reconstructed trajectory, using mean of weigts',
'reconstructedNOTFULL': 'Reconstructed Not Full trajectory',
'reconstructedFULL': 'Reconstructed PRedicted Full trajectory'
}
plt.plot()
plt.plot(test_traj_right[:, 0], test_traj_right[:, 1], 'blue', label=labels['real'])
plt.plot(reconstr_traj_mean_right_x, reconstr_traj_mean_right_y, 'red', label=labels['reconstructed'])
plt.plot(reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new, '--go', label=labels['reconstructedNOTFULL'])
plt.plot(reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED, 'black', label=labels['reconstructedFULL'])
plt.legend(loc='lower right')
plt.show() | interpolate_timestamps | identifier_name |
RBF_full_NEW.py | import numpy as np
import matplotlib.pyplot as plt
from functions import comb_dataset, read_csv_fast
import os
import pandas as pd
def interpolate_timestamps(NUM_POINTS):
csv_data = read_csv_fast(os.path.dirname(os.path.realpath(__file__))+'/trajectories/right_100.csv')
timestamps = csv_data[:,0]
duration = timestamps[-1] - timestamps[0]
interpolated_duration_list = [0]
for i in range(NUM_POINTS-2):
interpolated_duration_list.append(np.nan)
interpolated_duration_list.append(duration)
series = pd.Series(interpolated_duration_list)
result = series.interpolate()
return np.array(result)
def normalize_time(full_timestamps, half_timestamp):
|
def learn_weights(norm_data, PSIs_matrix, LAMBDA_COEFF=1e-12):
"""
:param norm_data: predifined trajectories -> data to learn weights
:param PSIs_matrix: matrix of basis kernel functions (taken from compute_feature_matrix)
:return: learned weights
"""
# Find out the number of basis functions
N = PSIs_matrix.shape[1]
# Find out the dimentionality of trajectories (x and y)
dof = norm_data[0].shape[1]
# There is a matrix of zeros (#Dofs x #basis functions) of weights for each of the demonstrations.
weights = np.zeros((norm_data.shape[0], dof, N))
# fill weights matrix
for index in range(norm_data.shape[0]):
for i in range(dof):
# In case some regularization is necessary
# weights[index][i] = np.dot(np.linalg.inv(np.dot(PSIs_matrix, PSIs_matrix.T) + 10e-12 * np.eye(np.dot(PSIs_matrix, PSIs_matrix.T).shape[0])), np.dot(PSIs_matrix, norm_data[index][:,i]))
# weights[index][i] = np.dot(np.linalg.pinv(
# np.dot(PSIs_matrix, PSIs_matrix.T) + LAMBDA_COEFF * np.identity(PSIs_matrix.shape[0])),
# np.dot(PSIs_matrix, norm_data[index][:, i]))
A = np.dot(PSIs_matrix.T, PSIs_matrix) + LAMBDA_COEFF * np.identity(N)
B = np.dot(PSIs_matrix.T, norm_data[index][:, i])
weights[index,i,:] = np.linalg.solve(A, B)
return weights
def compute_feature_matrix(phases, N, h):
"""
Compute a TxN matrix of features of the given phase vector using Gaussian basis functions.
Where T is the number of elements in the phase vector and N is the number of basis functions.
Parameters
----------
numpy.ndarray
phases: vector with phases
int
N: number of basis functions in the resulting matrix
float
h: width of a basis function (variance)
Returns
-------
numpy.ndarray
TxN matrix of Gaussian basis functions
"""
T = len(phases)
# Uniformly distribute the centers of N basis functions in domain[-2h,2h+1].
centers = np.linspace(-2 * h, 1 + 2 * h, num=N)
# compute a TxN matrix with centers
C = np.repeat(centers.reshape(1, N), T, axis=0)
# compute a TxN matrix with phases
P = np.repeat(phases.reshape(T, 1), N, axis=1)
# compute a TxN feature matrix
Phi = np.exp(- 0.5 / h * np.square(P - C))
# normalize the feature matrix
Phi = Phi / np.sum(Phi, axis=1).reshape(T, 1)
return Phi
####################################################
################ Call For Functions ################
####################################################
# parameters
N = 8 # Number of basis functions
h = 0.1 #1.0 / (N * N)
ridge_factor = 1e-12
time_steps = 100
dataset = comb_dataset(time_steps)[:3] # get dataset
num_right_traj = len(dataset[0]) # number of right trajectories in dataset
M = 2 # dimentionality
###### Prepare Matrix for Basis Functions ######
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
percentageFULL = 100
part_timestamps = (len(interpolated_timestamps) * (percentageFULL) / 100)
interpolated_timestamps_full = interpolated_timestamps[0:part_timestamps]
phase_full = normalize_time(interpolated_timestamps, interpolated_timestamps_full)
# z = np.linspace(0, 1, 100) # starting at 0, ending at 1
# a= z[:,None]
phases_full = phase_full[:, None]
psi = compute_feature_matrix(phases_full, N, h) # shape (6, 100)
###### Calculate WEIGHTS ######
weights_right = learn_weights(dataset[0], psi)
###### Calculate MEAN of weights ######
weight_mean_right_x, weight_mean_right_y = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
weights_mean_right_x, weights_mean_right_y = weight_mean_right_x[:,None], weight_mean_right_y[:,None] # shape (6, 1)
combined_weights_mean_xy = [weights_mean_right_x[:, 0], weights_mean_right_y[:, 0]]
###### Reconstructed Trajectory ######
x_weights_right, y_weights_right = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
reconstr_traj_mean_right_x, reconstr_traj_mean_right_y = np.dot(psi, x_weights_right[:,None]).reshape([time_steps]), np.dot(psi, y_weights_right[:,None]).reshape([time_steps])
###### Calculate COVARIANCE of weights ######
weights_cov_right_x = np.cov(weights_right[:,0].T) # shape (6, 6)
weights_cov_right_y = np.cov(weights_right[:,1].T)
combined_cov_right_xy = [weights_cov_right_x, weights_cov_right_y]
###### bound calculation for mean ######
traj_cov_x_diag = np.sum(psi.dot(weights_cov_right_x) * psi, axis=1)
std_x = np.sqrt(traj_cov_x_diag)
bound_upp_x = reconstr_traj_mean_right_x + 2 * std_x
bound_bottom_x = reconstr_traj_mean_right_x - 2 * std_x
traj_cov_y_diag = np.sum(psi.dot(weights_cov_right_y) * psi, axis=1)
std_y = np.sqrt(traj_cov_y_diag)
bound_upp_y = reconstr_traj_mean_right_y + 2 * std_y
bound_bottom_y = reconstr_traj_mean_right_y - 2 * std_y
fig, ax = plt.subplots()
plt.figure(1)
plt.plot(reconstr_traj_mean_right_x, 'black')
plt.fill_between(np.arange(time_steps), bound_upp_x, bound_bottom_x, alpha = 0.5, color = 'red', linewidth = 1)
plt.show()
###########################################################
###### Calculate of NEW MEAN & COVARIANCE | 3rd step ######
###########################################################
test_traj_right = np.mean(dataset[0], axis=0) # mean trajectory from RIGHT DB
percentage = 10
part = (len(test_traj_right) * (percentage) / 100)
test_traj_right_NOTfull = test_traj_right[0:part]
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
part_timestamps = (len(interpolated_timestamps) * (percentage) / 100)
interpolated_timestamps_NOTfull = interpolated_timestamps[0:part_timestamps]
# print interpolated_timestamps_NOTfull
phase_NOTfull = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# print phase_NOTfull
# exit(1)
# phases = []
# for t in range(len(interpolated_timestamps_NOTfull)):
# phase = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# phases.append(phase)
#
# print phases
# # print phase
# # exit(1)
# feature matrix for current trajectory
N1 = 8
h1 = 0.1
obs_variance = 0.0005 #????
z1 = np.linspace(0, 1, len(test_traj_right_NOTfull)) # starting at 0, ending at 1
psi_new = compute_feature_matrix(phase_NOTfull, N1, h1) # shape (6, 10)
# print "shape: ", psi_new.shape
psi_mean = []
for a in range(len(psi_new)):
psi_mean.append(np.mean(psi_new[a]))
# compute w_mean and w_cov separately for each dimension
num_dimensions = 2
w_mean_new = [np.empty([N1]) for i in range(num_dimensions)]
w_cov_new = [np.empty([N1, N1]) for i in range(num_dimensions)]
for i in range(num_dimensions): # for BOTH DIMENTIONS
C = np.dot(combined_cov_right_xy[i], psi_new.T) # shape (8,10)
D = np.diag(np.sum(C * psi_new.T, axis=0) + obs_variance) # shape (10, 10)
b = test_traj_right_NOTfull[:, i] - np.dot(psi_new, combined_weights_mean_xy[i]) # shape 10
x = np.linalg.solve(D, b) # shape (10, 8)
Lb = np.dot(C, x) # shape 8
w_mean_new[ i] = combined_weights_mean_xy[i] + Lb
y = np.linalg.solve(D, C.T) # shape (10, 8)
La = np.dot(C, y) # shape (8, 8)
w_cov_new[i] = np.array(combined_cov_right_xy[i]) - La
# print "COV1: ", combined_cov_right_xy[i].shape
# print "C: ", C.shape
# print "D : ", D.shape
# print "meanALL: ", combined_weights_mean_xy[i].shape
# print "b: ", b.shape
# print "x: ", x.shape
# print "y: ", y.shape
# print "Lb: ", Lb.shape
# print "La: ", La.shape
###### Reconstructed NOT FULL Trajectory ######
reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new = np.dot(psi_new, w_mean_new[0]), np.dot(psi_new, w_mean_new[1])
###### Reconstructed PREDICTED FULL Trajectory ######
reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED = np.dot(psi, w_mean_new[0]), np.dot(psi, w_mean_new[1])
# print len(reconstr_traj_mean_right_x), len(reconstr_traj_mean_right_y)
# print len(reconstr_traj_mean_right_x_new), len(reconstr_traj_mean_right_y_new)
# print "full:", test_traj_right
# print "NOT_full:", test_traj_right_NOTfull
# print w_mean_new, len( w_mean_new)
############################################
################# PLOTTING #################
############################################
plt.title('')
plt.xlabel('x, [m]')
plt.ylabel('y, [m]')
labels = {
'real': 'Mean Trajectory from Demonstations',
'reconstructed': 'Reconstructed trajectory, using mean of weigts',
'reconstructedNOTFULL': 'Reconstructed Not Full trajectory',
'reconstructedFULL': 'Reconstructed PRedicted Full trajectory'
}
plt.plot()
plt.plot(test_traj_right[:, 0], test_traj_right[:, 1], 'blue', label=labels['real'])
plt.plot(reconstr_traj_mean_right_x, reconstr_traj_mean_right_y, 'red', label=labels['reconstructed'])
plt.plot(reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new, '--go', label=labels['reconstructedNOTFULL'])
plt.plot(reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED, 'black', label=labels['reconstructedFULL'])
plt.legend(loc='lower right')
plt.show() | """
Computes phase from given timestamps. Phase is normalized time from 0 to 1.
"""
phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])
return phases | identifier_body |
RBF_full_NEW.py | import numpy as np
import matplotlib.pyplot as plt
from functions import comb_dataset, read_csv_fast
import os
import pandas as pd
def interpolate_timestamps(NUM_POINTS):
csv_data = read_csv_fast(os.path.dirname(os.path.realpath(__file__))+'/trajectories/right_100.csv')
timestamps = csv_data[:,0]
duration = timestamps[-1] - timestamps[0]
interpolated_duration_list = [0]
for i in range(NUM_POINTS-2):
|
interpolated_duration_list.append(duration)
series = pd.Series(interpolated_duration_list)
result = series.interpolate()
return np.array(result)
def normalize_time(full_timestamps, half_timestamp):
"""
Computes phase from given timestamps. Phase is normalized time from 0 to 1.
"""
phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])
return phases
def learn_weights(norm_data, PSIs_matrix, LAMBDA_COEFF=1e-12):
"""
:param norm_data: predifined trajectories -> data to learn weights
:param PSIs_matrix: matrix of basis kernel functions (taken from compute_feature_matrix)
:return: learned weights
"""
# Find out the number of basis functions
N = PSIs_matrix.shape[1]
# Find out the dimentionality of trajectories (x and y)
dof = norm_data[0].shape[1]
# There is a matrix of zeros (#Dofs x #basis functions) of weights for each of the demonstrations.
weights = np.zeros((norm_data.shape[0], dof, N))
# fill weights matrix
for index in range(norm_data.shape[0]):
for i in range(dof):
# In case some regularization is necessary
# weights[index][i] = np.dot(np.linalg.inv(np.dot(PSIs_matrix, PSIs_matrix.T) + 10e-12 * np.eye(np.dot(PSIs_matrix, PSIs_matrix.T).shape[0])), np.dot(PSIs_matrix, norm_data[index][:,i]))
# weights[index][i] = np.dot(np.linalg.pinv(
# np.dot(PSIs_matrix, PSIs_matrix.T) + LAMBDA_COEFF * np.identity(PSIs_matrix.shape[0])),
# np.dot(PSIs_matrix, norm_data[index][:, i]))
A = np.dot(PSIs_matrix.T, PSIs_matrix) + LAMBDA_COEFF * np.identity(N)
B = np.dot(PSIs_matrix.T, norm_data[index][:, i])
weights[index,i,:] = np.linalg.solve(A, B)
return weights
def compute_feature_matrix(phases, N, h):
"""
Compute a TxN matrix of features of the given phase vector using Gaussian basis functions.
Where T is the number of elements in the phase vector and N is the number of basis functions.
Parameters
----------
numpy.ndarray
phases: vector with phases
int
N: number of basis functions in the resulting matrix
float
h: width of a basis function (variance)
Returns
-------
numpy.ndarray
TxN matrix of Gaussian basis functions
"""
T = len(phases)
# Uniformly distribute the centers of N basis functions in domain[-2h,2h+1].
centers = np.linspace(-2 * h, 1 + 2 * h, num=N)
# compute a TxN matrix with centers
C = np.repeat(centers.reshape(1, N), T, axis=0)
# compute a TxN matrix with phases
P = np.repeat(phases.reshape(T, 1), N, axis=1)
# compute a TxN feature matrix
Phi = np.exp(- 0.5 / h * np.square(P - C))
# normalize the feature matrix
Phi = Phi / np.sum(Phi, axis=1).reshape(T, 1)
return Phi
####################################################
################ Call For Functions ################
####################################################
# parameters
N = 8 # Number of basis functions
h = 0.1 #1.0 / (N * N)
ridge_factor = 1e-12
time_steps = 100
dataset = comb_dataset(time_steps)[:3] # get dataset
num_right_traj = len(dataset[0]) # number of right trajectories in dataset
M = 2 # dimentionality
###### Prepare Matrix for Basis Functions ######
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
percentageFULL = 100
part_timestamps = (len(interpolated_timestamps) * (percentageFULL) / 100)
interpolated_timestamps_full = interpolated_timestamps[0:part_timestamps]
phase_full = normalize_time(interpolated_timestamps, interpolated_timestamps_full)
# z = np.linspace(0, 1, 100) # starting at 0, ending at 1
# a= z[:,None]
phases_full = phase_full[:, None]
psi = compute_feature_matrix(phases_full, N, h) # shape (6, 100)
###### Calculate WEIGHTS ######
weights_right = learn_weights(dataset[0], psi)
###### Calculate MEAN of weights ######
weight_mean_right_x, weight_mean_right_y = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
weights_mean_right_x, weights_mean_right_y = weight_mean_right_x[:,None], weight_mean_right_y[:,None] # shape (6, 1)
combined_weights_mean_xy = [weights_mean_right_x[:, 0], weights_mean_right_y[:, 0]]
###### Reconstructed Trajectory ######
x_weights_right, y_weights_right = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
reconstr_traj_mean_right_x, reconstr_traj_mean_right_y = np.dot(psi, x_weights_right[:,None]).reshape([time_steps]), np.dot(psi, y_weights_right[:,None]).reshape([time_steps])
###### Calculate COVARIANCE of weights ######
weights_cov_right_x = np.cov(weights_right[:,0].T) # shape (6, 6)
weights_cov_right_y = np.cov(weights_right[:,1].T)
combined_cov_right_xy = [weights_cov_right_x, weights_cov_right_y]
###### bound calculation for mean ######
traj_cov_x_diag = np.sum(psi.dot(weights_cov_right_x) * psi, axis=1)
std_x = np.sqrt(traj_cov_x_diag)
bound_upp_x = reconstr_traj_mean_right_x + 2 * std_x
bound_bottom_x = reconstr_traj_mean_right_x - 2 * std_x
traj_cov_y_diag = np.sum(psi.dot(weights_cov_right_y) * psi, axis=1)
std_y = np.sqrt(traj_cov_y_diag)
bound_upp_y = reconstr_traj_mean_right_y + 2 * std_y
bound_bottom_y = reconstr_traj_mean_right_y - 2 * std_y
fig, ax = plt.subplots()
plt.figure(1)
plt.plot(reconstr_traj_mean_right_x, 'black')
plt.fill_between(np.arange(time_steps), bound_upp_x, bound_bottom_x, alpha = 0.5, color = 'red', linewidth = 1)
plt.show()
###########################################################
###### Calculate of NEW MEAN & COVARIANCE | 3rd step ######
###########################################################
test_traj_right = np.mean(dataset[0], axis=0) # mean trajectory from RIGHT DB
percentage = 10
part = (len(test_traj_right) * (percentage) / 100)
test_traj_right_NOTfull = test_traj_right[0:part]
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
part_timestamps = (len(interpolated_timestamps) * (percentage) / 100)
interpolated_timestamps_NOTfull = interpolated_timestamps[0:part_timestamps]
# print interpolated_timestamps_NOTfull
phase_NOTfull = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# print phase_NOTfull
# exit(1)
# phases = []
# for t in range(len(interpolated_timestamps_NOTfull)):
# phase = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# phases.append(phase)
#
# print phases
# # print phase
# # exit(1)
# feature matrix for current trajectory
N1 = 8
h1 = 0.1
obs_variance = 0.0005 #????
z1 = np.linspace(0, 1, len(test_traj_right_NOTfull)) # starting at 0, ending at 1
psi_new = compute_feature_matrix(phase_NOTfull, N1, h1) # shape (6, 10)
# print "shape: ", psi_new.shape
psi_mean = []
for a in range(len(psi_new)):
psi_mean.append(np.mean(psi_new[a]))
# compute w_mean and w_cov separately for each dimension
num_dimensions = 2
w_mean_new = [np.empty([N1]) for i in range(num_dimensions)]
w_cov_new = [np.empty([N1, N1]) for i in range(num_dimensions)]
for i in range(num_dimensions): # for BOTH DIMENTIONS
C = np.dot(combined_cov_right_xy[i], psi_new.T) # shape (8,10)
D = np.diag(np.sum(C * psi_new.T, axis=0) + obs_variance) # shape (10, 10)
b = test_traj_right_NOTfull[:, i] - np.dot(psi_new, combined_weights_mean_xy[i]) # shape 10
x = np.linalg.solve(D, b) # shape (10, 8)
Lb = np.dot(C, x) # shape 8
w_mean_new[ i] = combined_weights_mean_xy[i] + Lb
y = np.linalg.solve(D, C.T) # shape (10, 8)
La = np.dot(C, y) # shape (8, 8)
w_cov_new[i] = np.array(combined_cov_right_xy[i]) - La
# print "COV1: ", combined_cov_right_xy[i].shape
# print "C: ", C.shape
# print "D : ", D.shape
# print "meanALL: ", combined_weights_mean_xy[i].shape
# print "b: ", b.shape
# print "x: ", x.shape
# print "y: ", y.shape
# print "Lb: ", Lb.shape
# print "La: ", La.shape
###### Reconstructed NOT FULL Trajectory ######
reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new = np.dot(psi_new, w_mean_new[0]), np.dot(psi_new, w_mean_new[1])
###### Reconstructed PREDICTED FULL Trajectory ######
reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED = np.dot(psi, w_mean_new[0]), np.dot(psi, w_mean_new[1])
# print len(reconstr_traj_mean_right_x), len(reconstr_traj_mean_right_y)
# print len(reconstr_traj_mean_right_x_new), len(reconstr_traj_mean_right_y_new)
# print "full:", test_traj_right
# print "NOT_full:", test_traj_right_NOTfull
# print w_mean_new, len( w_mean_new)
############################################
################# PLOTTING #################
############################################
plt.title('')
plt.xlabel('x, [m]')
plt.ylabel('y, [m]')
labels = {
'real': 'Mean Trajectory from Demonstations',
'reconstructed': 'Reconstructed trajectory, using mean of weigts',
'reconstructedNOTFULL': 'Reconstructed Not Full trajectory',
'reconstructedFULL': 'Reconstructed PRedicted Full trajectory'
}
plt.plot()
plt.plot(test_traj_right[:, 0], test_traj_right[:, 1], 'blue', label=labels['real'])
plt.plot(reconstr_traj_mean_right_x, reconstr_traj_mean_right_y, 'red', label=labels['reconstructed'])
plt.plot(reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new, '--go', label=labels['reconstructedNOTFULL'])
plt.plot(reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED, 'black', label=labels['reconstructedFULL'])
plt.legend(loc='lower right')
plt.show() | interpolated_duration_list.append(np.nan) | conditional_block |
RBF_full_NEW.py | import numpy as np
import matplotlib.pyplot as plt
from functions import comb_dataset, read_csv_fast
import os
import pandas as pd
def interpolate_timestamps(NUM_POINTS):
csv_data = read_csv_fast(os.path.dirname(os.path.realpath(__file__))+'/trajectories/right_100.csv')
timestamps = csv_data[:,0]
duration = timestamps[-1] - timestamps[0]
interpolated_duration_list = [0]
for i in range(NUM_POINTS-2):
interpolated_duration_list.append(np.nan)
interpolated_duration_list.append(duration)
series = pd.Series(interpolated_duration_list)
result = series.interpolate()
return np.array(result)
def normalize_time(full_timestamps, half_timestamp):
"""
Computes phase from given timestamps. Phase is normalized time from 0 to 1.
"""
phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])
return phases
def learn_weights(norm_data, PSIs_matrix, LAMBDA_COEFF=1e-12):
"""
:param norm_data: predifined trajectories -> data to learn weights
:param PSIs_matrix: matrix of basis kernel functions (taken from compute_feature_matrix)
:return: learned weights
"""
# Find out the number of basis functions
N = PSIs_matrix.shape[1]
# Find out the dimentionality of trajectories (x and y)
dof = norm_data[0].shape[1]
# There is a matrix of zeros (#Dofs x #basis functions) of weights for each of the demonstrations.
weights = np.zeros((norm_data.shape[0], dof, N))
# fill weights matrix
for index in range(norm_data.shape[0]):
for i in range(dof):
# In case some regularization is necessary
# weights[index][i] = np.dot(np.linalg.inv(np.dot(PSIs_matrix, PSIs_matrix.T) + 10e-12 * np.eye(np.dot(PSIs_matrix, PSIs_matrix.T).shape[0])), np.dot(PSIs_matrix, norm_data[index][:,i]))
# weights[index][i] = np.dot(np.linalg.pinv(
# np.dot(PSIs_matrix, PSIs_matrix.T) + LAMBDA_COEFF * np.identity(PSIs_matrix.shape[0])),
# np.dot(PSIs_matrix, norm_data[index][:, i]))
A = np.dot(PSIs_matrix.T, PSIs_matrix) + LAMBDA_COEFF * np.identity(N)
B = np.dot(PSIs_matrix.T, norm_data[index][:, i])
weights[index,i,:] = np.linalg.solve(A, B)
return weights
def compute_feature_matrix(phases, N, h):
"""
Compute a TxN matrix of features of the given phase vector using Gaussian basis functions.
Where T is the number of elements in the phase vector and N is the number of basis functions.
Parameters
----------
numpy.ndarray
phases: vector with phases
int
N: number of basis functions in the resulting matrix
float
h: width of a basis function (variance)
Returns
-------
numpy.ndarray
TxN matrix of Gaussian basis functions
"""
T = len(phases)
# Uniformly distribute the centers of N basis functions in domain[-2h,2h+1].
centers = np.linspace(-2 * h, 1 + 2 * h, num=N)
# compute a TxN matrix with centers
C = np.repeat(centers.reshape(1, N), T, axis=0)
# compute a TxN matrix with phases
P = np.repeat(phases.reshape(T, 1), N, axis=1)
# compute a TxN feature matrix
Phi = np.exp(- 0.5 / h * np.square(P - C))
# normalize the feature matrix
Phi = Phi / np.sum(Phi, axis=1).reshape(T, 1)
return Phi
####################################################
################ Call For Functions ################
####################################################
# parameters
N = 8 # Number of basis functions
h = 0.1 #1.0 / (N * N)
ridge_factor = 1e-12
time_steps = 100
dataset = comb_dataset(time_steps)[:3] # get dataset
num_right_traj = len(dataset[0]) # number of right trajectories in dataset
M = 2 # dimentionality
###### Prepare Matrix for Basis Functions ######
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
percentageFULL = 100
part_timestamps = (len(interpolated_timestamps) * (percentageFULL) / 100)
interpolated_timestamps_full = interpolated_timestamps[0:part_timestamps]
phase_full = normalize_time(interpolated_timestamps, interpolated_timestamps_full)
# z = np.linspace(0, 1, 100) # starting at 0, ending at 1
# a= z[:,None]
phases_full = phase_full[:, None]
psi = compute_feature_matrix(phases_full, N, h) # shape (6, 100)
###### Calculate WEIGHTS ######
weights_right = learn_weights(dataset[0], psi)
###### Calculate MEAN of weights ######
weight_mean_right_x, weight_mean_right_y = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0)
weights_mean_right_x, weights_mean_right_y = weight_mean_right_x[:,None], weight_mean_right_y[:,None] # shape (6, 1)
combined_weights_mean_xy = [weights_mean_right_x[:, 0], weights_mean_right_y[:, 0]]
###### Reconstructed Trajectory ######
x_weights_right, y_weights_right = np.mean(weights_right[:,0], axis=0), np.mean(weights_right[:,1], axis=0) | combined_cov_right_xy = [weights_cov_right_x, weights_cov_right_y]
###### bound calculation for mean ######
traj_cov_x_diag = np.sum(psi.dot(weights_cov_right_x) * psi, axis=1)
std_x = np.sqrt(traj_cov_x_diag)
bound_upp_x = reconstr_traj_mean_right_x + 2 * std_x
bound_bottom_x = reconstr_traj_mean_right_x - 2 * std_x
traj_cov_y_diag = np.sum(psi.dot(weights_cov_right_y) * psi, axis=1)
std_y = np.sqrt(traj_cov_y_diag)
bound_upp_y = reconstr_traj_mean_right_y + 2 * std_y
bound_bottom_y = reconstr_traj_mean_right_y - 2 * std_y
fig, ax = plt.subplots()
plt.figure(1)
plt.plot(reconstr_traj_mean_right_x, 'black')
plt.fill_between(np.arange(time_steps), bound_upp_x, bound_bottom_x, alpha = 0.5, color = 'red', linewidth = 1)
plt.show()
###########################################################
###### Calculate of NEW MEAN & COVARIANCE | 3rd step ######
###########################################################
test_traj_right = np.mean(dataset[0], axis=0) # mean trajectory from RIGHT DB
percentage = 10
part = (len(test_traj_right) * (percentage) / 100)
test_traj_right_NOTfull = test_traj_right[0:part]
###### PHASE calculation ###############
interpolated_timestamps = interpolate_timestamps(time_steps)
part_timestamps = (len(interpolated_timestamps) * (percentage) / 100)
interpolated_timestamps_NOTfull = interpolated_timestamps[0:part_timestamps]
# print interpolated_timestamps_NOTfull
phase_NOTfull = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# print phase_NOTfull
# exit(1)
# phases = []
# for t in range(len(interpolated_timestamps_NOTfull)):
# phase = normalize_time(interpolated_timestamps, interpolated_timestamps_NOTfull)
# phases.append(phase)
#
# print phases
# # print phase
# # exit(1)
# feature matrix for current trajectory
N1 = 8
h1 = 0.1
obs_variance = 0.0005 #????
z1 = np.linspace(0, 1, len(test_traj_right_NOTfull)) # starting at 0, ending at 1
psi_new = compute_feature_matrix(phase_NOTfull, N1, h1) # shape (6, 10)
# print "shape: ", psi_new.shape
psi_mean = []
for a in range(len(psi_new)):
psi_mean.append(np.mean(psi_new[a]))
# compute w_mean and w_cov separately for each dimension
num_dimensions = 2
w_mean_new = [np.empty([N1]) for i in range(num_dimensions)]
w_cov_new = [np.empty([N1, N1]) for i in range(num_dimensions)]
for i in range(num_dimensions): # for BOTH DIMENTIONS
C = np.dot(combined_cov_right_xy[i], psi_new.T) # shape (8,10)
D = np.diag(np.sum(C * psi_new.T, axis=0) + obs_variance) # shape (10, 10)
b = test_traj_right_NOTfull[:, i] - np.dot(psi_new, combined_weights_mean_xy[i]) # shape 10
x = np.linalg.solve(D, b) # shape (10, 8)
Lb = np.dot(C, x) # shape 8
w_mean_new[ i] = combined_weights_mean_xy[i] + Lb
y = np.linalg.solve(D, C.T) # shape (10, 8)
La = np.dot(C, y) # shape (8, 8)
w_cov_new[i] = np.array(combined_cov_right_xy[i]) - La
# print "COV1: ", combined_cov_right_xy[i].shape
# print "C: ", C.shape
# print "D : ", D.shape
# print "meanALL: ", combined_weights_mean_xy[i].shape
# print "b: ", b.shape
# print "x: ", x.shape
# print "y: ", y.shape
# print "Lb: ", Lb.shape
# print "La: ", La.shape
###### Reconstructed NOT FULL Trajectory ######
reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new = np.dot(psi_new, w_mean_new[0]), np.dot(psi_new, w_mean_new[1])
###### Reconstructed PREDICTED FULL Trajectory ######
reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED = np.dot(psi, w_mean_new[0]), np.dot(psi, w_mean_new[1])
# print len(reconstr_traj_mean_right_x), len(reconstr_traj_mean_right_y)
# print len(reconstr_traj_mean_right_x_new), len(reconstr_traj_mean_right_y_new)
# print "full:", test_traj_right
# print "NOT_full:", test_traj_right_NOTfull
# print w_mean_new, len( w_mean_new)
############################################
################# PLOTTING #################
############################################
plt.title('')
plt.xlabel('x, [m]')
plt.ylabel('y, [m]')
labels = {
'real': 'Mean Trajectory from Demonstations',
'reconstructed': 'Reconstructed trajectory, using mean of weigts',
'reconstructedNOTFULL': 'Reconstructed Not Full trajectory',
'reconstructedFULL': 'Reconstructed PRedicted Full trajectory'
}
plt.plot()
plt.plot(test_traj_right[:, 0], test_traj_right[:, 1], 'blue', label=labels['real'])
plt.plot(reconstr_traj_mean_right_x, reconstr_traj_mean_right_y, 'red', label=labels['reconstructed'])
plt.plot(reconstr_traj_mean_right_x_new, reconstr_traj_mean_right_y_new, '--go', label=labels['reconstructedNOTFULL'])
plt.plot(reconstr_traj_mean_right_x_PREDICTED, reconstr_traj_mean_right_y_PREDICTED, 'black', label=labels['reconstructedFULL'])
plt.legend(loc='lower right')
plt.show() | reconstr_traj_mean_right_x, reconstr_traj_mean_right_y = np.dot(psi, x_weights_right[:,None]).reshape([time_steps]), np.dot(psi, y_weights_right[:,None]).reshape([time_steps])
###### Calculate COVARIANCE of weights ######
weights_cov_right_x = np.cov(weights_right[:,0].T) # shape (6, 6)
weights_cov_right_y = np.cov(weights_right[:,1].T) | random_line_split |
validation.go | // Package validation provides UpgradeConfig CR validation tools.
package validation
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/blang/semver"
"github.com/go-logr/logr"
"github.com/google/uuid"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/cluster-version-operator/pkg/cincinnati"
imagereference "github.com/openshift/library-go/pkg/image/reference"
upgradev1alpha1 "github.com/openshift/managed-upgrade-operator/pkg/apis/upgrade/v1alpha1"
cv "github.com/openshift/managed-upgrade-operator/pkg/clusterversion"
)
const (
defaultUpstreamServer = "https://api.openshift.com/api/upgrades_info/v1/graph"
)
// NewBuilder returns a validationBuilder object that implements the ValidationBuilder interface.
func NewBuilder() ValidationBuilder {
return &validationBuilder{}
}
// Validator knows how to validate UpgradeConfig CRs.
//go:generate mockgen -destination=mocks/mockValidation.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation Validator
type Validator interface {
IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error)
}
type validator struct{}
// ValidatorResult returns a type that enables validation of upgradeconfigs
type ValidatorResult struct {
// Indicates that the UpgradeConfig is semantically and syntactically valid
IsValid bool
// Indicates that the UpgradeConfig should be actioned to conduct an upgrade
IsAvailableUpdate bool
// A message associated with the validation result
Message string
}
// VersionComparison is an in used to compare versions
type VersionComparison int
const (
// VersionUnknown is of type VersionComparision and is used to idicate an unknown version
VersionUnknown VersionComparison = iota - 2
// VersionDowngrade is of type VersionComparision and is used to idicate an version downgrade
VersionDowngrade
// VersionEqual is of type VersionComparision and is used to idicate version are equal
VersionEqual
// VersionUpgrade is of type VersionComparision and is used to idicate version is able to upgrade
VersionUpgrade
)
// IsValidUpgradeConfig checks the validity of UpgradeConfig CRs
func (v *validator) IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error) {
// Validate upgradeAt as RFC3339
upgradeAt := uC.Spec.UpgradeAt
_, err := time.Parse(time.RFC3339, upgradeAt)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse upgradeAt:%s during validation", upgradeAt),
}, nil
}
// Initial validation considering the usage for three optional fields for image, version and channel.
// If the UpgradeConfig doesn't support image or version based upgrade then fail validation.
// TODO: Remove (image and version) message once OSD-7609 is done.
if !supportsImageUpgrade(uC) && !supportsVersionUpgrade(uC) {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to validate .spec.desired in UpgradeConfig: Either (image and version) or (version and channel) should be specified",
}, nil
}
// Validate image spec reference
// Sample image spec: "quay.io/openshift-release-dev/ocp-release@sha256:8c3f5392ac933cd520b4dce560e007f2472d2d943de14c29cbbb40c72ae44e4c"
// Image spec structure: Registry/Namespace/Name@ID
image := uC.Spec.Desired.Image
if supportsImageUpgrade(uC) {
ref, err := imagereference.Parse(image)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image %s: must be a valid image pull spec:%v", image, err),
}, nil
}
if len(ref.Registry) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no registry specified", image),
}, nil
}
if len(ref.Namespace) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no repository specified", image),
}, nil
}
if len(ref.ID) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no image digest specified", image),
}, nil
}
}
// Validate desired version.
dv := uC.Spec.Desired.Version
if !empty(dv) {
version, err := cv.GetCurrentVersion(cV)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to get current cluster version during validation",
}, err
}
// Check for valid SemVer and convert to SemVer.
desiredVersion, err := semver.Parse(dv)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse desired version %s as semver", dv),
}, nil
}
currentVersion, err := semver.Parse(version)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse current version %s as semver", version),
}, nil
}
// Compare versions to ascertain if upgrade should proceed.
versionComparison, err := compareVersions(desiredVersion, currentVersion, logger)
if err != nil {
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: err.Error(),
}, nil
}
switch versionComparison {
case VersionUnknown:
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s and current version %s could not be compared.", desiredVersion, currentVersion),
}, nil
case VersionDowngrade:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Downgrades to desired version %s from %s are unsupported", desiredVersion, currentVersion),
}, nil
case VersionEqual:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s matches the current version %s", desiredVersion, currentVersion),
}, nil
case VersionUpgrade:
logger.Info(fmt.Sprintf("Desired version %s validated as greater than current version %s", desiredVersion, currentVersion))
}
}
desiredChannel := uC.Spec.Desired.Channel
if supportsVersionUpgrade(uC) {
// Validate available version is in Cincinnati.
clusterId, err := uuid.Parse(string(cV.Spec.ClusterID))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
upstreamURI, err := url.Parse(getUpstreamURL(cV))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
version, _ := cv.GetCurrentVersion(cV)
desiredVersion, _ := semver.Parse(dv)
currentVersion, _ := semver.Parse(version)
updates, err := cincinnati.NewClient(clusterId).GetUpdates(upstreamURI.String(), desiredChannel, currentVersion)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, err
}
var cvoUpdates []configv1.Update
for _, update := range updates {
cvoUpdates = append(cvoUpdates, configv1.Update{
Version: update.Version.String(),
Image: update.Image,
})
}
// Check whether the desired version exists in availableUpdates
found := false
for _, v := range cvoUpdates |
if !found {
logger.Info(fmt.Sprintf("Failed to find the desired version %s in channel %s", desiredVersion, desiredChannel))
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("cannot find version %s in available updates", desiredVersion),
}, nil
}
} else {
logger.Info("Skipping version validation from channel as image is used")
}
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: true,
Message: "UpgradeConfig is valid",
}, nil
}
// compareVersions accepts desiredVersion and currentVersion strings as versions, converts
// them to semver and then compares them. Returns an indication of whether the desired
// version constitutes a downgrade, no-op or upgrade, or an error if no valid comparison can occur
func compareVersions(dV semver.Version, cV semver.Version, logger logr.Logger) (VersionComparison, error) {
result := dV.Compare(cV)
switch result {
case -1:
logger.Info(fmt.Sprintf("%s is less than %s", dV, cV))
return VersionDowngrade, nil
case 0:
logger.Info(fmt.Sprintf("%s is equal to %s", dV, cV))
return VersionEqual, nil
case 1:
logger.Info(fmt.Sprintf("%s is greater than %s", dV, cV))
return VersionUpgrade, nil
default:
return VersionUnknown, fmt.Errorf("semver comparison failed for unknown reason. Versions %s & %s", dV, cV)
}
}
// getUpstreamURL retrieves the upstream URL from the ClusterVersion spec, defaulting to the default if not available
func getUpstreamURL(cV *configv1.ClusterVersion) string {
upstream := string(cV.Spec.Upstream)
if len(upstream) == 0 {
upstream = defaultUpstreamServer
}
return upstream
}
// ValidationBuilder is a interface that enables ValidationBuiler implementations
//go:generate mockgen -destination=mocks/mockValidationBuilder.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation ValidationBuilder
type ValidationBuilder interface {
NewClient() (Validator, error)
}
// validationBuilder is an empty struct that enables instantiation of this type and its
// implemented interface.
type validationBuilder struct{}
// NewClient returns a Validator interface or an error if one occurs.
func (vb *validationBuilder) NewClient() (Validator, error) {
return &validator{}, nil
}
// supportsImageUpgrade function checks if the upgrade should proceed with image digest reference.
// TODO: In future, image should not be tied with version for validation. Refer Jira OSD-7609.
func supportsImageUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return !empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && empty(uc.Spec.Desired.Channel)
}
// supportsVersionUpgrade function checks if the upgrade should proceed with version from a channel.
func supportsVersionUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && !empty(uc.Spec.Desired.Channel)
}
// empty function checks if a given string is empty or not.
func empty(s string) bool {
return strings.TrimSpace(s) == ""
}
| {
if v.Version == dv && !v.Force {
found = true
}
} | conditional_block |
validation.go | // Package validation provides UpgradeConfig CR validation tools.
package validation
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/blang/semver"
"github.com/go-logr/logr"
"github.com/google/uuid"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/cluster-version-operator/pkg/cincinnati"
imagereference "github.com/openshift/library-go/pkg/image/reference"
upgradev1alpha1 "github.com/openshift/managed-upgrade-operator/pkg/apis/upgrade/v1alpha1"
cv "github.com/openshift/managed-upgrade-operator/pkg/clusterversion"
)
const (
defaultUpstreamServer = "https://api.openshift.com/api/upgrades_info/v1/graph"
)
// NewBuilder returns a validationBuilder object that implements the ValidationBuilder interface.
func NewBuilder() ValidationBuilder {
return &validationBuilder{}
}
// Validator knows how to validate UpgradeConfig CRs.
//go:generate mockgen -destination=mocks/mockValidation.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation Validator
type Validator interface {
IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error)
}
type validator struct{}
// ValidatorResult returns a type that enables validation of upgradeconfigs
type ValidatorResult struct {
// Indicates that the UpgradeConfig is semantically and syntactically valid
IsValid bool
// Indicates that the UpgradeConfig should be actioned to conduct an upgrade
IsAvailableUpdate bool
// A message associated with the validation result
Message string
}
// VersionComparison is an in used to compare versions
type VersionComparison int
const (
// VersionUnknown is of type VersionComparision and is used to idicate an unknown version
VersionUnknown VersionComparison = iota - 2
// VersionDowngrade is of type VersionComparision and is used to idicate an version downgrade
VersionDowngrade
// VersionEqual is of type VersionComparision and is used to idicate version are equal
VersionEqual
// VersionUpgrade is of type VersionComparision and is used to idicate version is able to upgrade
VersionUpgrade
)
// IsValidUpgradeConfig checks the validity of UpgradeConfig CRs
func (v *validator) IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error) |
// compareVersions accepts desiredVersion and currentVersion strings as versions, converts
// them to semver and then compares them. Returns an indication of whether the desired
// version constitutes a downgrade, no-op or upgrade, or an error if no valid comparison can occur
func compareVersions(dV semver.Version, cV semver.Version, logger logr.Logger) (VersionComparison, error) {
result := dV.Compare(cV)
switch result {
case -1:
logger.Info(fmt.Sprintf("%s is less than %s", dV, cV))
return VersionDowngrade, nil
case 0:
logger.Info(fmt.Sprintf("%s is equal to %s", dV, cV))
return VersionEqual, nil
case 1:
logger.Info(fmt.Sprintf("%s is greater than %s", dV, cV))
return VersionUpgrade, nil
default:
return VersionUnknown, fmt.Errorf("semver comparison failed for unknown reason. Versions %s & %s", dV, cV)
}
}
// getUpstreamURL retrieves the upstream URL from the ClusterVersion spec, defaulting to the default if not available
func getUpstreamURL(cV *configv1.ClusterVersion) string {
upstream := string(cV.Spec.Upstream)
if len(upstream) == 0 {
upstream = defaultUpstreamServer
}
return upstream
}
// ValidationBuilder is a interface that enables ValidationBuiler implementations
//go:generate mockgen -destination=mocks/mockValidationBuilder.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation ValidationBuilder
type ValidationBuilder interface {
NewClient() (Validator, error)
}
// validationBuilder is an empty struct that enables instantiation of this type and its
// implemented interface.
type validationBuilder struct{}
// NewClient returns a Validator interface or an error if one occurs.
func (vb *validationBuilder) NewClient() (Validator, error) {
return &validator{}, nil
}
// supportsImageUpgrade function checks if the upgrade should proceed with image digest reference.
// TODO: In future, image should not be tied with version for validation. Refer Jira OSD-7609.
func supportsImageUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return !empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && empty(uc.Spec.Desired.Channel)
}
// supportsVersionUpgrade function checks if the upgrade should proceed with version from a channel.
func supportsVersionUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && !empty(uc.Spec.Desired.Channel)
}
// empty function checks if a given string is empty or not.
func empty(s string) bool {
return strings.TrimSpace(s) == ""
}
| {
// Validate upgradeAt as RFC3339
upgradeAt := uC.Spec.UpgradeAt
_, err := time.Parse(time.RFC3339, upgradeAt)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse upgradeAt:%s during validation", upgradeAt),
}, nil
}
// Initial validation considering the usage for three optional fields for image, version and channel.
// If the UpgradeConfig doesn't support image or version based upgrade then fail validation.
// TODO: Remove (image and version) message once OSD-7609 is done.
if !supportsImageUpgrade(uC) && !supportsVersionUpgrade(uC) {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to validate .spec.desired in UpgradeConfig: Either (image and version) or (version and channel) should be specified",
}, nil
}
// Validate image spec reference
// Sample image spec: "quay.io/openshift-release-dev/ocp-release@sha256:8c3f5392ac933cd520b4dce560e007f2472d2d943de14c29cbbb40c72ae44e4c"
// Image spec structure: Registry/Namespace/Name@ID
image := uC.Spec.Desired.Image
if supportsImageUpgrade(uC) {
ref, err := imagereference.Parse(image)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image %s: must be a valid image pull spec:%v", image, err),
}, nil
}
if len(ref.Registry) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no registry specified", image),
}, nil
}
if len(ref.Namespace) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no repository specified", image),
}, nil
}
if len(ref.ID) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no image digest specified", image),
}, nil
}
}
// Validate desired version.
dv := uC.Spec.Desired.Version
if !empty(dv) {
version, err := cv.GetCurrentVersion(cV)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to get current cluster version during validation",
}, err
}
// Check for valid SemVer and convert to SemVer.
desiredVersion, err := semver.Parse(dv)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse desired version %s as semver", dv),
}, nil
}
currentVersion, err := semver.Parse(version)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse current version %s as semver", version),
}, nil
}
// Compare versions to ascertain if upgrade should proceed.
versionComparison, err := compareVersions(desiredVersion, currentVersion, logger)
if err != nil {
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: err.Error(),
}, nil
}
switch versionComparison {
case VersionUnknown:
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s and current version %s could not be compared.", desiredVersion, currentVersion),
}, nil
case VersionDowngrade:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Downgrades to desired version %s from %s are unsupported", desiredVersion, currentVersion),
}, nil
case VersionEqual:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s matches the current version %s", desiredVersion, currentVersion),
}, nil
case VersionUpgrade:
logger.Info(fmt.Sprintf("Desired version %s validated as greater than current version %s", desiredVersion, currentVersion))
}
}
desiredChannel := uC.Spec.Desired.Channel
if supportsVersionUpgrade(uC) {
// Validate available version is in Cincinnati.
clusterId, err := uuid.Parse(string(cV.Spec.ClusterID))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
upstreamURI, err := url.Parse(getUpstreamURL(cV))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
version, _ := cv.GetCurrentVersion(cV)
desiredVersion, _ := semver.Parse(dv)
currentVersion, _ := semver.Parse(version)
updates, err := cincinnati.NewClient(clusterId).GetUpdates(upstreamURI.String(), desiredChannel, currentVersion)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, err
}
var cvoUpdates []configv1.Update
for _, update := range updates {
cvoUpdates = append(cvoUpdates, configv1.Update{
Version: update.Version.String(),
Image: update.Image,
})
}
// Check whether the desired version exists in availableUpdates
found := false
for _, v := range cvoUpdates {
if v.Version == dv && !v.Force {
found = true
}
}
if !found {
logger.Info(fmt.Sprintf("Failed to find the desired version %s in channel %s", desiredVersion, desiredChannel))
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("cannot find version %s in available updates", desiredVersion),
}, nil
}
} else {
logger.Info("Skipping version validation from channel as image is used")
}
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: true,
Message: "UpgradeConfig is valid",
}, nil
} | identifier_body |
validation.go | // Package validation provides UpgradeConfig CR validation tools.
package validation
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/blang/semver"
"github.com/go-logr/logr"
"github.com/google/uuid"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/cluster-version-operator/pkg/cincinnati"
imagereference "github.com/openshift/library-go/pkg/image/reference"
upgradev1alpha1 "github.com/openshift/managed-upgrade-operator/pkg/apis/upgrade/v1alpha1"
cv "github.com/openshift/managed-upgrade-operator/pkg/clusterversion"
)
const (
defaultUpstreamServer = "https://api.openshift.com/api/upgrades_info/v1/graph"
)
// NewBuilder returns a validationBuilder object that implements the ValidationBuilder interface.
func NewBuilder() ValidationBuilder {
return &validationBuilder{}
}
// Validator knows how to validate UpgradeConfig CRs.
//go:generate mockgen -destination=mocks/mockValidation.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation Validator
type Validator interface {
IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error)
}
type validator struct{}
// ValidatorResult returns a type that enables validation of upgradeconfigs
type ValidatorResult struct {
// Indicates that the UpgradeConfig is semantically and syntactically valid
IsValid bool
// Indicates that the UpgradeConfig should be actioned to conduct an upgrade
IsAvailableUpdate bool
// A message associated with the validation result
Message string
}
// VersionComparison is an in used to compare versions
type VersionComparison int
const (
// VersionUnknown is of type VersionComparision and is used to idicate an unknown version
VersionUnknown VersionComparison = iota - 2
// VersionDowngrade is of type VersionComparision and is used to idicate an version downgrade
VersionDowngrade
// VersionEqual is of type VersionComparision and is used to idicate version are equal
VersionEqual
// VersionUpgrade is of type VersionComparision and is used to idicate version is able to upgrade
VersionUpgrade
)
// IsValidUpgradeConfig checks the validity of UpgradeConfig CRs
func (v *validator) IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error) {
// Validate upgradeAt as RFC3339
upgradeAt := uC.Spec.UpgradeAt
_, err := time.Parse(time.RFC3339, upgradeAt)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse upgradeAt:%s during validation", upgradeAt),
}, nil
}
// Initial validation considering the usage for three optional fields for image, version and channel.
// If the UpgradeConfig doesn't support image or version based upgrade then fail validation.
// TODO: Remove (image and version) message once OSD-7609 is done.
if !supportsImageUpgrade(uC) && !supportsVersionUpgrade(uC) {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to validate .spec.desired in UpgradeConfig: Either (image and version) or (version and channel) should be specified",
}, nil
}
// Validate image spec reference
// Sample image spec: "quay.io/openshift-release-dev/ocp-release@sha256:8c3f5392ac933cd520b4dce560e007f2472d2d943de14c29cbbb40c72ae44e4c"
// Image spec structure: Registry/Namespace/Name@ID
image := uC.Spec.Desired.Image
if supportsImageUpgrade(uC) {
ref, err := imagereference.Parse(image)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image %s: must be a valid image pull spec:%v", image, err),
}, nil
}
if len(ref.Registry) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no registry specified", image),
}, nil
}
if len(ref.Namespace) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no repository specified", image),
}, nil
}
if len(ref.ID) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no image digest specified", image),
}, nil
}
}
// Validate desired version.
dv := uC.Spec.Desired.Version
if !empty(dv) {
version, err := cv.GetCurrentVersion(cV)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to get current cluster version during validation",
}, err
}
// Check for valid SemVer and convert to SemVer.
desiredVersion, err := semver.Parse(dv)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse desired version %s as semver", dv),
}, nil
}
currentVersion, err := semver.Parse(version)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse current version %s as semver", version),
}, nil
}
// Compare versions to ascertain if upgrade should proceed.
versionComparison, err := compareVersions(desiredVersion, currentVersion, logger)
if err != nil {
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: err.Error(),
}, nil
}
switch versionComparison {
case VersionUnknown:
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s and current version %s could not be compared.", desiredVersion, currentVersion),
}, nil
case VersionDowngrade:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Downgrades to desired version %s from %s are unsupported", desiredVersion, currentVersion),
}, nil
case VersionEqual:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s matches the current version %s", desiredVersion, currentVersion),
}, nil
case VersionUpgrade:
logger.Info(fmt.Sprintf("Desired version %s validated as greater than current version %s", desiredVersion, currentVersion))
}
}
desiredChannel := uC.Spec.Desired.Channel
if supportsVersionUpgrade(uC) {
// Validate available version is in Cincinnati.
clusterId, err := uuid.Parse(string(cV.Spec.ClusterID))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
upstreamURI, err := url.Parse(getUpstreamURL(cV))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
version, _ := cv.GetCurrentVersion(cV)
desiredVersion, _ := semver.Parse(dv)
currentVersion, _ := semver.Parse(version)
updates, err := cincinnati.NewClient(clusterId).GetUpdates(upstreamURI.String(), desiredChannel, currentVersion)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, err
}
var cvoUpdates []configv1.Update
for _, update := range updates {
cvoUpdates = append(cvoUpdates, configv1.Update{
Version: update.Version.String(),
Image: update.Image,
})
}
// Check whether the desired version exists in availableUpdates
found := false
for _, v := range cvoUpdates {
if v.Version == dv && !v.Force {
found = true
}
}
if !found {
logger.Info(fmt.Sprintf("Failed to find the desired version %s in channel %s", desiredVersion, desiredChannel))
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("cannot find version %s in available updates", desiredVersion),
}, nil
}
} else {
logger.Info("Skipping version validation from channel as image is used")
}
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: true,
Message: "UpgradeConfig is valid",
}, nil
}
// compareVersions accepts desiredVersion and currentVersion strings as versions, converts
// them to semver and then compares them. Returns an indication of whether the desired
// version constitutes a downgrade, no-op or upgrade, or an error if no valid comparison can occur
func compareVersions(dV semver.Version, cV semver.Version, logger logr.Logger) (VersionComparison, error) {
result := dV.Compare(cV)
switch result {
case -1:
logger.Info(fmt.Sprintf("%s is less than %s", dV, cV))
return VersionDowngrade, nil
case 0:
logger.Info(fmt.Sprintf("%s is equal to %s", dV, cV))
return VersionEqual, nil
case 1:
logger.Info(fmt.Sprintf("%s is greater than %s", dV, cV))
return VersionUpgrade, nil
default:
return VersionUnknown, fmt.Errorf("semver comparison failed for unknown reason. Versions %s & %s", dV, cV)
}
}
// getUpstreamURL retrieves the upstream URL from the ClusterVersion spec, defaulting to the default if not available
func getUpstreamURL(cV *configv1.ClusterVersion) string {
upstream := string(cV.Spec.Upstream)
if len(upstream) == 0 {
upstream = defaultUpstreamServer
}
return upstream
}
// ValidationBuilder is a interface that enables ValidationBuiler implementations
//go:generate mockgen -destination=mocks/mockValidationBuilder.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation ValidationBuilder
type ValidationBuilder interface {
NewClient() (Validator, error)
}
// validationBuilder is an empty struct that enables instantiation of this type and its
// implemented interface.
type validationBuilder struct{}
// NewClient returns a Validator interface or an error if one occurs.
func (vb *validationBuilder) NewClient() (Validator, error) {
return &validator{}, nil
}
// supportsImageUpgrade function checks if the upgrade should proceed with image digest reference.
// TODO: In future, image should not be tied with version for validation. Refer Jira OSD-7609.
func supportsImageUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return !empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && empty(uc.Spec.Desired.Channel)
}
// supportsVersionUpgrade function checks if the upgrade should proceed with version from a channel.
func | (uc *upgradev1alpha1.UpgradeConfig) bool {
return empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && !empty(uc.Spec.Desired.Channel)
}
// empty function checks if a given string is empty or not.
func empty(s string) bool {
return strings.TrimSpace(s) == ""
}
| supportsVersionUpgrade | identifier_name |
validation.go | // Package validation provides UpgradeConfig CR validation tools.
package validation
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/blang/semver"
"github.com/go-logr/logr"
"github.com/google/uuid"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/cluster-version-operator/pkg/cincinnati"
imagereference "github.com/openshift/library-go/pkg/image/reference"
upgradev1alpha1 "github.com/openshift/managed-upgrade-operator/pkg/apis/upgrade/v1alpha1"
cv "github.com/openshift/managed-upgrade-operator/pkg/clusterversion"
)
const (
defaultUpstreamServer = "https://api.openshift.com/api/upgrades_info/v1/graph"
)
// NewBuilder returns a validationBuilder object that implements the ValidationBuilder interface.
func NewBuilder() ValidationBuilder {
return &validationBuilder{}
}
// Validator knows how to validate UpgradeConfig CRs.
//go:generate mockgen -destination=mocks/mockValidation.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation Validator
type Validator interface {
IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error)
}
type validator struct{}
// ValidatorResult returns a type that enables validation of upgradeconfigs
type ValidatorResult struct {
// Indicates that the UpgradeConfig is semantically and syntactically valid
IsValid bool
// Indicates that the UpgradeConfig should be actioned to conduct an upgrade
IsAvailableUpdate bool
// A message associated with the validation result
Message string
}
// VersionComparison is an in used to compare versions
type VersionComparison int
const (
// VersionUnknown is of type VersionComparision and is used to idicate an unknown version
VersionUnknown VersionComparison = iota - 2
// VersionDowngrade is of type VersionComparision and is used to idicate an version downgrade
VersionDowngrade
// VersionEqual is of type VersionComparision and is used to idicate version are equal
VersionEqual
// VersionUpgrade is of type VersionComparision and is used to idicate version is able to upgrade
VersionUpgrade
)
// IsValidUpgradeConfig checks the validity of UpgradeConfig CRs
func (v *validator) IsValidUpgradeConfig(uC *upgradev1alpha1.UpgradeConfig, cV *configv1.ClusterVersion, logger logr.Logger) (ValidatorResult, error) {
// Validate upgradeAt as RFC3339
upgradeAt := uC.Spec.UpgradeAt
_, err := time.Parse(time.RFC3339, upgradeAt)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse upgradeAt:%s during validation", upgradeAt),
}, nil
}
// Initial validation considering the usage for three optional fields for image, version and channel.
// If the UpgradeConfig doesn't support image or version based upgrade then fail validation.
// TODO: Remove (image and version) message once OSD-7609 is done.
if !supportsImageUpgrade(uC) && !supportsVersionUpgrade(uC) {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to validate .spec.desired in UpgradeConfig: Either (image and version) or (version and channel) should be specified",
}, nil
}
// Validate image spec reference
// Sample image spec: "quay.io/openshift-release-dev/ocp-release@sha256:8c3f5392ac933cd520b4dce560e007f2472d2d943de14c29cbbb40c72ae44e4c"
// Image spec structure: Registry/Namespace/Name@ID
image := uC.Spec.Desired.Image
if supportsImageUpgrade(uC) {
ref, err := imagereference.Parse(image)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image %s: must be a valid image pull spec:%v", image, err),
}, nil
}
if len(ref.Registry) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no registry specified", image),
}, nil
}
if len(ref.Namespace) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no repository specified", image),
}, nil
}
| if len(ref.ID) == 0 {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse image:%s must be a valid image pull spec: no image digest specified", image),
}, nil
}
}
// Validate desired version.
dv := uC.Spec.Desired.Version
if !empty(dv) {
version, err := cv.GetCurrentVersion(cV)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "Failed to get current cluster version during validation",
}, err
}
// Check for valid SemVer and convert to SemVer.
desiredVersion, err := semver.Parse(dv)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse desired version %s as semver", dv),
}, nil
}
currentVersion, err := semver.Parse(version)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Failed to parse current version %s as semver", version),
}, nil
}
// Compare versions to ascertain if upgrade should proceed.
versionComparison, err := compareVersions(desiredVersion, currentVersion, logger)
if err != nil {
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: err.Error(),
}, nil
}
switch versionComparison {
case VersionUnknown:
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s and current version %s could not be compared.", desiredVersion, currentVersion),
}, nil
case VersionDowngrade:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Downgrades to desired version %s from %s are unsupported", desiredVersion, currentVersion),
}, nil
case VersionEqual:
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: false,
Message: fmt.Sprintf("Desired version %s matches the current version %s", desiredVersion, currentVersion),
}, nil
case VersionUpgrade:
logger.Info(fmt.Sprintf("Desired version %s validated as greater than current version %s", desiredVersion, currentVersion))
}
}
desiredChannel := uC.Spec.Desired.Channel
if supportsVersionUpgrade(uC) {
// Validate available version is in Cincinnati.
clusterId, err := uuid.Parse(string(cV.Spec.ClusterID))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
upstreamURI, err := url.Parse(getUpstreamURL(cV))
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, nil
}
version, _ := cv.GetCurrentVersion(cV)
desiredVersion, _ := semver.Parse(dv)
currentVersion, _ := semver.Parse(version)
updates, err := cincinnati.NewClient(clusterId).GetUpdates(upstreamURI.String(), desiredChannel, currentVersion)
if err != nil {
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: "",
}, err
}
var cvoUpdates []configv1.Update
for _, update := range updates {
cvoUpdates = append(cvoUpdates, configv1.Update{
Version: update.Version.String(),
Image: update.Image,
})
}
// Check whether the desired version exists in availableUpdates
found := false
for _, v := range cvoUpdates {
if v.Version == dv && !v.Force {
found = true
}
}
if !found {
logger.Info(fmt.Sprintf("Failed to find the desired version %s in channel %s", desiredVersion, desiredChannel))
return ValidatorResult{
IsValid: false,
IsAvailableUpdate: false,
Message: fmt.Sprintf("cannot find version %s in available updates", desiredVersion),
}, nil
}
} else {
logger.Info("Skipping version validation from channel as image is used")
}
return ValidatorResult{
IsValid: true,
IsAvailableUpdate: true,
Message: "UpgradeConfig is valid",
}, nil
}
// compareVersions accepts desiredVersion and currentVersion strings as versions, converts
// them to semver and then compares them. Returns an indication of whether the desired
// version constitutes a downgrade, no-op or upgrade, or an error if no valid comparison can occur
func compareVersions(dV semver.Version, cV semver.Version, logger logr.Logger) (VersionComparison, error) {
result := dV.Compare(cV)
switch result {
case -1:
logger.Info(fmt.Sprintf("%s is less than %s", dV, cV))
return VersionDowngrade, nil
case 0:
logger.Info(fmt.Sprintf("%s is equal to %s", dV, cV))
return VersionEqual, nil
case 1:
logger.Info(fmt.Sprintf("%s is greater than %s", dV, cV))
return VersionUpgrade, nil
default:
return VersionUnknown, fmt.Errorf("semver comparison failed for unknown reason. Versions %s & %s", dV, cV)
}
}
// getUpstreamURL retrieves the upstream URL from the ClusterVersion spec, defaulting to the default if not available
func getUpstreamURL(cV *configv1.ClusterVersion) string {
upstream := string(cV.Spec.Upstream)
if len(upstream) == 0 {
upstream = defaultUpstreamServer
}
return upstream
}
// ValidationBuilder is a interface that enables ValidationBuiler implementations
//go:generate mockgen -destination=mocks/mockValidationBuilder.go -package=mocks github.com/openshift/managed-upgrade-operator/pkg/validation ValidationBuilder
type ValidationBuilder interface {
NewClient() (Validator, error)
}
// validationBuilder is an empty struct that enables instantiation of this type and its
// implemented interface.
type validationBuilder struct{}
// NewClient returns a Validator interface or an error if one occurs.
func (vb *validationBuilder) NewClient() (Validator, error) {
return &validator{}, nil
}
// supportsImageUpgrade function checks if the upgrade should proceed with image digest reference.
// TODO: In future, image should not be tied with version for validation. Refer Jira OSD-7609.
func supportsImageUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return !empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && empty(uc.Spec.Desired.Channel)
}
// supportsVersionUpgrade function checks if the upgrade should proceed with version from a channel.
func supportsVersionUpgrade(uc *upgradev1alpha1.UpgradeConfig) bool {
return empty(uc.Spec.Desired.Image) && !empty(uc.Spec.Desired.Version) && !empty(uc.Spec.Desired.Channel)
}
// empty function checks if a given string is empty or not.
func empty(s string) bool {
return strings.TrimSpace(s) == ""
} | random_line_split |
|
ecc.py | """This module deals with Eliptical Curve Operations:
keys, signing
"""
import base64
import binascii
import ecdsa
import hashlib
import hmac
import pyaes
from .hash import sha256d
from .serialize import write_compact_size
def msg_magic(message):
return b"\x18Bitcoin Signed Message:\n" + write_compact_size(len(message)) + message
def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:
"""Compute a public key from a private key.
The private key must be 32 bytes long.
Uncompressed public keys are 65 bytes long:
0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
Compressed keys are 33 bytes long:
<sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
"""
key = ECKey(privkey)
return key.get_public_key(compressed)
# from http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) // 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s //= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) // 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in range(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def | (a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
return -1 if ls == p - 1 else ls
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = ecdsa.curves.SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = ecdsa.util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = (x * x * x + curveFp.a() * x + curveFp.b()) % curveFp.p()
beta = modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = ecdsa.ellipticcurve.Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = int(h.hex(), 16)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = ecdsa.numbertheory.inverse_mod(r, order)
Q = inv_r * (s * R + minus_e * G)
return klass.from_public_point(Q, curve)
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(
sig[1:], recid, h, curve=ecdsa.curves.SECP256k1), compressed
def number_to_string(num: int, order: int) -> bytes:
l = ecdsa.util.orderlen(order)
fmt_str = "%0" + str(2 * l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
class InvalidPadding(Exception):
pass
class InvalidPassword(Exception):
def __str__(self):
return "Incorrect password"
def append_PKCS7_padding(data: bytes) -> bytes:
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data: bytes) -> bytes:
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key: bytes, iv: bytes, data: bytes):
data = append_PKCS7_padding(data)
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key: bytes, iv: bytes, data: bytes) -> bytes:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
class ECKey(object):
def __init__(self, k: bytes):
assert len(k) == 32
secret = int(k.hex(), 16)
self.pubkey = ecdsa.ecdsa.Public_key(
ecdsa.ecdsa.generator_secp256k1,
ecdsa.ecdsa.generator_secp256k1 * secret)
self.privkey = ecdsa.ecdsa.Private_key(self.pubkey, secret)
self.secret = secret
def get_public_key(self, compressed: bool) -> bytes:
if compressed:
if self.pubkey.point.y() & 1:
key = '03' + '%064x' % self.pubkey.point.x()
else:
key = '02' + '%064x' % self.pubkey.point.x()
else:
key = '04' + \
'%064x' % self.pubkey.point.x() + \
'%064x' % self.pubkey.point.y()
return bytes.fromhex(key)
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(
self.secret, curve=ecdsa.curves.SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(
msg_hash, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string)
assert public_key.verify_digest(
signature, msg_hash, sigdecode=ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message: bytes, is_compressed: bool):
signature = self.sign(sha256d(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message: bytes):
h = sha256d(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode=ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods;
# AES-128-CBC with PKCS7 is used as the cipher;
# hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message: bytes, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(
ecdsa.util.randrange(pow(2, 256)),
ecdsa.ecdsa.generator_secp256k1.order())
ephemeral = ECKey(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True)
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1,
ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def ECC_YfromX(x, odd=True):
_p = ecdsa.ecdsa.curve_secp256k1.p()
_a = ecdsa.ecdsa.curve_secp256k1.a()
_b = ecdsa.ecdsa.curve_secp256k1.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p + 1) // 4, _p)
if ecdsa.ecdsa.curve_secp256k1.contains_point(Mx, My):
if odd == bool(My & 1):
return [My, offset]
return [_p - My, offset]
raise Exception('ECC_YfromX: No Y found')
def ser_to_point(Aser) -> ecdsa.ellipticcurve.Point:
curve = ecdsa.ecdsa.curve_secp256k1
generator = ecdsa.ecdsa.generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return ecdsa.ellipticcurve.Point(
curve, ecdsa.util.string_to_number(Aser[1:33]),
ecdsa.util.string_to_number(Aser[33:]), _r)
Mx = ecdsa.util.string_to_number(Aser[1:])
My = ECC_YfromX(Mx, Aser[0] == 0x03)[0]
return ecdsa.ellipticcurve.Point(curve, Mx, My, _r)
def point_to_ser(P: ecdsa.ellipticcurve.Point, comp: bool = True) -> bytes:
if comp:
return bytes.fromhex(('%02x' % (2 + (P.y() & 1))) + ('%064x' % P.x()))
return bytes.fromhex('04'+('%064x' % P.x()) + ('%064x' % P.y())) | legendre_symbol | identifier_name |
ecc.py | """This module deals with Eliptical Curve Operations:
keys, signing
"""
import base64
import binascii
import ecdsa
import hashlib
import hmac
import pyaes
from .hash import sha256d
from .serialize import write_compact_size
def msg_magic(message):
return b"\x18Bitcoin Signed Message:\n" + write_compact_size(len(message)) + message
def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:
"""Compute a public key from a private key.
The private key must be 32 bytes long.
Uncompressed public keys are 65 bytes long:
0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
Compressed keys are 33 bytes long:
<sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
"""
key = ECKey(privkey)
return key.get_public_key(compressed)
# from http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) // 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s //= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) // 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in range(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
return -1 if ls == p - 1 else ls
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = ecdsa.curves.SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = ecdsa.util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = (x * x * x + curveFp.a() * x + curveFp.b()) % curveFp.p()
beta = modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = ecdsa.ellipticcurve.Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = int(h.hex(), 16)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = ecdsa.numbertheory.inverse_mod(r, order)
Q = inv_r * (s * R + minus_e * G)
return klass.from_public_point(Q, curve)
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(
sig[1:], recid, h, curve=ecdsa.curves.SECP256k1), compressed
def number_to_string(num: int, order: int) -> bytes:
l = ecdsa.util.orderlen(order)
fmt_str = "%0" + str(2 * l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
class InvalidPadding(Exception):
pass
class InvalidPassword(Exception):
def __str__(self):
|
def append_PKCS7_padding(data: bytes) -> bytes:
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data: bytes) -> bytes:
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key: bytes, iv: bytes, data: bytes):
data = append_PKCS7_padding(data)
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key: bytes, iv: bytes, data: bytes) -> bytes:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
class ECKey(object):
def __init__(self, k: bytes):
assert len(k) == 32
secret = int(k.hex(), 16)
self.pubkey = ecdsa.ecdsa.Public_key(
ecdsa.ecdsa.generator_secp256k1,
ecdsa.ecdsa.generator_secp256k1 * secret)
self.privkey = ecdsa.ecdsa.Private_key(self.pubkey, secret)
self.secret = secret
def get_public_key(self, compressed: bool) -> bytes:
if compressed:
if self.pubkey.point.y() & 1:
key = '03' + '%064x' % self.pubkey.point.x()
else:
key = '02' + '%064x' % self.pubkey.point.x()
else:
key = '04' + \
'%064x' % self.pubkey.point.x() + \
'%064x' % self.pubkey.point.y()
return bytes.fromhex(key)
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(
self.secret, curve=ecdsa.curves.SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(
msg_hash, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string)
assert public_key.verify_digest(
signature, msg_hash, sigdecode=ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message: bytes, is_compressed: bool):
signature = self.sign(sha256d(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message: bytes):
h = sha256d(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode=ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods;
# AES-128-CBC with PKCS7 is used as the cipher;
# hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message: bytes, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(
ecdsa.util.randrange(pow(2, 256)),
ecdsa.ecdsa.generator_secp256k1.order())
ephemeral = ECKey(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True)
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1,
ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def ECC_YfromX(x, odd=True):
_p = ecdsa.ecdsa.curve_secp256k1.p()
_a = ecdsa.ecdsa.curve_secp256k1.a()
_b = ecdsa.ecdsa.curve_secp256k1.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p + 1) // 4, _p)
if ecdsa.ecdsa.curve_secp256k1.contains_point(Mx, My):
if odd == bool(My & 1):
return [My, offset]
return [_p - My, offset]
raise Exception('ECC_YfromX: No Y found')
def ser_to_point(Aser) -> ecdsa.ellipticcurve.Point:
curve = ecdsa.ecdsa.curve_secp256k1
generator = ecdsa.ecdsa.generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return ecdsa.ellipticcurve.Point(
curve, ecdsa.util.string_to_number(Aser[1:33]),
ecdsa.util.string_to_number(Aser[33:]), _r)
Mx = ecdsa.util.string_to_number(Aser[1:])
My = ECC_YfromX(Mx, Aser[0] == 0x03)[0]
return ecdsa.ellipticcurve.Point(curve, Mx, My, _r)
def point_to_ser(P: ecdsa.ellipticcurve.Point, comp: bool = True) -> bytes:
if comp:
return bytes.fromhex(('%02x' % (2 + (P.y() & 1))) + ('%064x' % P.x()))
return bytes.fromhex('04'+('%064x' % P.x()) + ('%064x' % P.y())) | return "Incorrect password" | identifier_body |
ecc.py | """This module deals with Eliptical Curve Operations:
keys, signing
"""
import base64
import binascii
import ecdsa
import hashlib
import hmac
import pyaes
from .hash import sha256d
from .serialize import write_compact_size
def msg_magic(message):
return b"\x18Bitcoin Signed Message:\n" + write_compact_size(len(message)) + message
def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:
"""Compute a public key from a private key.
The private key must be 32 bytes long.
Uncompressed public keys are 65 bytes long:
0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
Compressed keys are 33 bytes long:
<sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
"""
key = ECKey(privkey)
return key.get_public_key(compressed)
# from http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) // 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s //= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) // 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in range(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
return -1 if ls == p - 1 else ls
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = ecdsa.curves.SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = ecdsa.util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = (x * x * x + curveFp.a() * x + curveFp.b()) % curveFp.p()
beta = modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = ecdsa.ellipticcurve.Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = int(h.hex(), 16)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = ecdsa.numbertheory.inverse_mod(r, order)
Q = inv_r * (s * R + minus_e * G)
return klass.from_public_point(Q, curve)
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(
sig[1:], recid, h, curve=ecdsa.curves.SECP256k1), compressed
def number_to_string(num: int, order: int) -> bytes:
l = ecdsa.util.orderlen(order)
fmt_str = "%0" + str(2 * l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
class InvalidPadding(Exception):
pass
class InvalidPassword(Exception):
def __str__(self):
return "Incorrect password"
def append_PKCS7_padding(data: bytes) -> bytes:
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data: bytes) -> bytes:
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key: bytes, iv: bytes, data: bytes):
data = append_PKCS7_padding(data)
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key: bytes, iv: bytes, data: bytes) -> bytes:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
class ECKey(object):
def __init__(self, k: bytes):
assert len(k) == 32
secret = int(k.hex(), 16) | ecdsa.ecdsa.generator_secp256k1 * secret)
self.privkey = ecdsa.ecdsa.Private_key(self.pubkey, secret)
self.secret = secret
def get_public_key(self, compressed: bool) -> bytes:
if compressed:
if self.pubkey.point.y() & 1:
key = '03' + '%064x' % self.pubkey.point.x()
else:
key = '02' + '%064x' % self.pubkey.point.x()
else:
key = '04' + \
'%064x' % self.pubkey.point.x() + \
'%064x' % self.pubkey.point.y()
return bytes.fromhex(key)
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(
self.secret, curve=ecdsa.curves.SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(
msg_hash, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string)
assert public_key.verify_digest(
signature, msg_hash, sigdecode=ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message: bytes, is_compressed: bool):
signature = self.sign(sha256d(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message: bytes):
h = sha256d(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode=ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods;
# AES-128-CBC with PKCS7 is used as the cipher;
# hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message: bytes, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(
ecdsa.util.randrange(pow(2, 256)),
ecdsa.ecdsa.generator_secp256k1.order())
ephemeral = ECKey(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True)
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1,
ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def ECC_YfromX(x, odd=True):
_p = ecdsa.ecdsa.curve_secp256k1.p()
_a = ecdsa.ecdsa.curve_secp256k1.a()
_b = ecdsa.ecdsa.curve_secp256k1.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p + 1) // 4, _p)
if ecdsa.ecdsa.curve_secp256k1.contains_point(Mx, My):
if odd == bool(My & 1):
return [My, offset]
return [_p - My, offset]
raise Exception('ECC_YfromX: No Y found')
def ser_to_point(Aser) -> ecdsa.ellipticcurve.Point:
curve = ecdsa.ecdsa.curve_secp256k1
generator = ecdsa.ecdsa.generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return ecdsa.ellipticcurve.Point(
curve, ecdsa.util.string_to_number(Aser[1:33]),
ecdsa.util.string_to_number(Aser[33:]), _r)
Mx = ecdsa.util.string_to_number(Aser[1:])
My = ECC_YfromX(Mx, Aser[0] == 0x03)[0]
return ecdsa.ellipticcurve.Point(curve, Mx, My, _r)
def point_to_ser(P: ecdsa.ellipticcurve.Point, comp: bool = True) -> bytes:
if comp:
return bytes.fromhex(('%02x' % (2 + (P.y() & 1))) + ('%064x' % P.x()))
return bytes.fromhex('04'+('%064x' % P.x()) + ('%064x' % P.y())) | self.pubkey = ecdsa.ecdsa.Public_key(
ecdsa.ecdsa.generator_secp256k1, | random_line_split |
ecc.py | """This module deals with Eliptical Curve Operations:
keys, signing
"""
import base64
import binascii
import ecdsa
import hashlib
import hmac
import pyaes
from .hash import sha256d
from .serialize import write_compact_size
def msg_magic(message):
return b"\x18Bitcoin Signed Message:\n" + write_compact_size(len(message)) + message
def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:
"""Compute a public key from a private key.
The private key must be 32 bytes long.
Uncompressed public keys are 65 bytes long:
0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
Compressed keys are 33 bytes long:
<sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
"""
key = ECKey(privkey)
return key.get_public_key(compressed)
# from http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) // 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s //= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) // 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in range(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
return -1 if ls == p - 1 else ls
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = ecdsa.curves.SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = ecdsa.util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = (x * x * x + curveFp.a() * x + curveFp.b()) % curveFp.p()
beta = modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = ecdsa.ellipticcurve.Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = int(h.hex(), 16)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = ecdsa.numbertheory.inverse_mod(r, order)
Q = inv_r * (s * R + minus_e * G)
return klass.from_public_point(Q, curve)
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(
sig[1:], recid, h, curve=ecdsa.curves.SECP256k1), compressed
def number_to_string(num: int, order: int) -> bytes:
l = ecdsa.util.orderlen(order)
fmt_str = "%0" + str(2 * l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
class InvalidPadding(Exception):
pass
class InvalidPassword(Exception):
def __str__(self):
return "Incorrect password"
def append_PKCS7_padding(data: bytes) -> bytes:
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data: bytes) -> bytes:
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key: bytes, iv: bytes, data: bytes):
data = append_PKCS7_padding(data)
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key: bytes, iv: bytes, data: bytes) -> bytes:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
class ECKey(object):
def __init__(self, k: bytes):
assert len(k) == 32
secret = int(k.hex(), 16)
self.pubkey = ecdsa.ecdsa.Public_key(
ecdsa.ecdsa.generator_secp256k1,
ecdsa.ecdsa.generator_secp256k1 * secret)
self.privkey = ecdsa.ecdsa.Private_key(self.pubkey, secret)
self.secret = secret
def get_public_key(self, compressed: bool) -> bytes:
if compressed:
if self.pubkey.point.y() & 1:
key = '03' + '%064x' % self.pubkey.point.x()
else:
key = '02' + '%064x' % self.pubkey.point.x()
else:
key = '04' + \
'%064x' % self.pubkey.point.x() + \
'%064x' % self.pubkey.point.y()
return bytes.fromhex(key)
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(
self.secret, curve=ecdsa.curves.SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(
msg_hash, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string)
assert public_key.verify_digest(
signature, msg_hash, sigdecode=ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message: bytes, is_compressed: bool):
signature = self.sign(sha256d(msg_magic(message)))
for i in range(4):
|
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message: bytes):
h = sha256d(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode=ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods;
# AES-128-CBC with PKCS7 is used as the cipher;
# hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message: bytes, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(
ecdsa.util.randrange(pow(2, 256)),
ecdsa.ecdsa.generator_secp256k1.order())
ephemeral = ECKey(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True)
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(
ecdsa.ecdsa.generator_secp256k1,
ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
def ECC_YfromX(x, odd=True):
_p = ecdsa.ecdsa.curve_secp256k1.p()
_a = ecdsa.ecdsa.curve_secp256k1.a()
_b = ecdsa.ecdsa.curve_secp256k1.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p + 1) // 4, _p)
if ecdsa.ecdsa.curve_secp256k1.contains_point(Mx, My):
if odd == bool(My & 1):
return [My, offset]
return [_p - My, offset]
raise Exception('ECC_YfromX: No Y found')
def ser_to_point(Aser) -> ecdsa.ellipticcurve.Point:
curve = ecdsa.ecdsa.curve_secp256k1
generator = ecdsa.ecdsa.generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return ecdsa.ellipticcurve.Point(
curve, ecdsa.util.string_to_number(Aser[1:33]),
ecdsa.util.string_to_number(Aser[33:]), _r)
Mx = ecdsa.util.string_to_number(Aser[1:])
My = ECC_YfromX(Mx, Aser[0] == 0x03)[0]
return ecdsa.ellipticcurve.Point(curve, Mx, My, _r)
def point_to_ser(P: ecdsa.ellipticcurve.Point, comp: bool = True) -> bytes:
if comp:
return bytes.fromhex(('%02x' % (2 + (P.y() & 1))) + ('%064x' % P.x()))
return bytes.fromhex('04'+('%064x' % P.x()) + ('%064x' % P.y())) | sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue | conditional_block |
model_param_old.py | import venture.shortcuts as s
from utils import *
from venture.unit import VentureUnit
from venture.ripl.ripl import _strip_types
num_features = 4
def loadFeatures(dataset, name, years, days,maxDay=None):
features_file = "data/input/dataset%d/%s-features.csv" % (dataset, name)
print "Loading features from %s" % features_file
features = readFeatures(features_file, maxYear=max(years)+1,maxDay=maxDay)
for (y, d, i, j) in features.keys():
if y not in years:# or d not in days:
del features[(y, d, i, j)]
return toVenture(features)
def loadObservations(ripl, dataset, name, years, days):
observations_file = "data/input/dataset%d/%s-observations.csv" % (dataset, name)
observations = readObservations(observations_file)
for y in years:
for (d, ns) in observations[y]:
if d not in days: continue
for i, n in enumerate(ns):
#print y, d, i
ripl.observe('(observe_birds %d %d %d)' % (y, d, i), n)
class OneBird(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.cells = params['cells']
self.years = params['years']
self.days = params['days']
self.features = loadFeatures(1, self.name, self.years, self.days)
super(OneBird, self).__init__(ripl, params)
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
# we want to infer the hyperparameters of a log-linear model
ripl.assume('scale', '(scope_include (quote hypers) (quote scale) (gamma 1 1))')
for k in range(num_features):
ripl.assume('hypers%d' % k, '(scope_include (quote hypers) %d (* scale (normal 0 10)))' % k)
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
# phi is the unnormalized probability of a bird moving
# from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(let ((fs (lookup features (array y d i j))))
(exp %s))))"""
% fold('+', '(* hypers_k_ (lookup fs _k_))', '_k_', num_features))
ripl.assume('get_bird_move_dist',
'(mem (lambda (y d i) ' +
fold('simplex', '(phi y d i j)', 'j', self.cells) +
'))')
ripl.assume('cell_array', fold('array', 'j', 'j', self.cells))
# samples where a bird would move to from cell i on day d
# the bird's id is used to identify the scope
ripl.assume('move', """
(lambda (y d i)
(let ((dist (get_bird_move_dist y d i)))
(scope_include (quote move) (array y d)
(categorical dist cell_array))))""")
ripl.assume('get_bird_pos', """
(mem (lambda (y d)
(if (= d 0) 0
(move y (- d 1) (get_bird_pos y (- d 1))))))""")
ripl.assume('count_birds', """
(lambda (y d i)
(if (= (get_bird_pos y d) i)
1 0))""")
ripl.assume('observe_birds', '(lambda (y d i) (poisson (+ (count_birds y d i) 0.0001)))')
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
observations_file = "data/input/dataset%d/%s-observations.csv" % (1, self.name)
observations = readObservations(observations_file)
self.unconstrained = []
for y in self.years:
for (d, ns) in observations[y]:
if d not in self.days: continue
if d == 0: continue
loc = None
for i, n in enumerate(ns):
if n > 0:
loc = i
break
if loc is None:
self.unconstrained.append((y, d-1))
#ripl.predict('(get_bird_pos %d %d)' % (y, d))
else:
ripl.observe('(get_bird_pos %d %d)' % (y, d), loc)
def inferMove(self, ripl = None):
if ripl is None:
ripl = self.ripl
for block in self.unconstrained:
ripl.infer({'kernel': 'gibbs', 'scope': 'move', 'block': block, 'transitions': 1})
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
class Poisson(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.width = params['width']
self.height = params['height']
self.cells = params['cells']
self.dataset = params['dataset']
self.total_birds = params['total_birds']
self.years = params['years']
self.days = params['days']
self.hypers = params["hypers"]
self.learnHypers = True if isinstance(self.hypers[0],str) else False
self.ground = readReconstruction(params)
if 'maxDay' in params:
self.maxDay = params["maxDay"]
self.features = loadFeatures(self.dataset, self.name, self.years, self.days,
maxDay=self.maxDay)
else:
self.features = loadFeatures(self.dataset, self.name, self.years, self.days)
val_features = self.features['value']
self.parsedFeatures = {k:_strip_types(v) for k,v in val_features.items() }
super(Poisson, self).__init__(ripl, params)
def feat_i(y,d,i,feat=2):
'Input *feat in range(3) (default=wind), return all values i,j for fixed i'
return [self.parsedFeatures[(y,d,i,j)][feat] for j in range(100)]
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
ripl.assume('total_birds', self.total_birds)
ripl.assume('cells', self.cells)
#ripl.assume('num_features', num_features)
# we want to infer the hyperparameters of a log-linear model
if not self.learnHypers:
for k, b in enumerate(self.hypers):
ripl.assume('hypers%d' % k, b)
else:
for k, prior in enumerate(self.hypers):
ripl.assume('hypers%d' % k,'(scope_include (quote hypers) 0 %s )'%prior)
#ripl.assume('hypers%d' % k,'(scope_include (quote hypers) %d %s )'%(k,prior) )
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
ripl.assume('width', self.width)
ripl.assume('height', self.height)
ripl.assume('max_dist2', '18')
ripl.assume('cell2X', '(lambda (cell) (int_div cell height))')
ripl.assume('cell2Y', '(lambda (cell) (int_mod cell height))')
#ripl.assume('cell2P', '(lambda (cell) (make_pair (cell2X cell) (cell2Y cell)))')
ripl.assume('XY2cell', '(lambda (x y) (+ (* height x) y))')
ripl.assume('square', '(lambda (x) (* x x))')
ripl.assume('dist2', """
(lambda (x1 y1 x2 y2)
(+ (square (- x1 x2)) (square (- y1 y2))))""")
ripl.assume('cell_dist2', """
(lambda (i j)
(dist2
(cell2X i) (cell2Y i)
(cell2X j) (cell2Y j)))""")
# phi is the unnormalized probability of a bird moving from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""
% fold('+', '(* hypers__k (lookup fs __k))', '__k', num_features))
ripl.assume('phi2', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""%'blah')
ripl.assume('get_bird_move_dist', """
(lambda (y d i)
(lambda (j)
(phi y d i j)))""")
ripl.assume('foldl', """
(lambda (op x min max f)
(if (= min max) x
(foldl op (op x (f min)) (+ min 1) max f)))""")
ripl.assume('multinomial_func', """
(lambda (n min max f)
(let ((normalize (foldl + 0 min max f)))
(mem (lambda (i)
(poisson (* n (/ (f i) normalize)))))))""")
ripl.assume('count_birds', """
(mem (lambda (y d i)
(if (= d 0) (if (= i 0) total_birds 0)""" +
fold('+', '(get_birds_moving y (- d 1) __j i)', '__j', self.cells) + ")))")
# bird_movements_loc
# if no birds at i, no movement to any j from i
# normalize is normalizing constant for probms from i
# n is product of count at position i * normed probility of i to j
# return lambda that takes j and return poisson of this n
## [NOTE: strings are bad, need library functions, need sq bracks for let]
ripl.assume('bird_movements_loc', """
(mem (lambda (y d i)
(if (= (count_birds y d i) 0)
(lambda (j) 0)
(let ((normalize (foldl + 0 0 cells (lambda (j) (phi y d i j)))))
(mem (lambda (j)
(if (= (phi y d i j) 0) 0
(let ((n (* (count_birds y d i) (/ (phi y d i j) normalize))))
(scope_include d (array y d i j)
(poisson n))))))))))""")
#ripl.assume('bird_movements', '(mem (lambda (y d) %s))' % fold('array', '(bird_movements_loc y d __i)', '__i', self.cells))
ripl.assume('observe_birds', '(mem (lambda (y d i) (poisson (+ (count_birds y d i) 0.0001))))')
# returns number birds from i,j (we want to force this value)
ripl.assume('get_birds_moving', """
(lambda (y d i j)
((bird_movements_loc y d i) j))""")
ripl.assume('get_birds_moving1', '(lambda (y d i) %s)' % fold('array', '(get_birds_moving y d i __j)', '__j', self.cells))
ripl.assume('get_birds_moving2', '(lambda (y d) %s)' % fold('array', '(get_birds_moving1 y d __i)', '__i', self.cells))
ripl.assume('get_birds_moving3', '(lambda (d) %s)' % fold('array', '(get_birds_moving2 __y d)', '__y', len(self.years)))
ripl.assume('get_birds_moving4', '(lambda () %s)' % fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1))
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading observations"
loadObservations(ripl, self.dataset, self.name, self.years, self.days)
def loadModel(self, ripl = None):
if ripl is None:
ripl = self.ripl
self.loadAssumes(ripl)
self.loadObserves(ripl)
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
def updateObserves(self, d):
self.days.append(d)
#if d > 0: self.ripl.forget('bird_moves')
loadObservations(self.ripl, self.dataset, self.name, self.years, [d])
self.ripl.infer('(incorporate)')
#self.ripl.predict(fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1), label='bird_moves')
def getBirdLocations(self, years=None, days=None):
if years is None: years = self.years
if days is None: days = self.days
bird_locations = {}
for y in years:
bird_locations[y] = {}
for d in days:
bird_locations[y][d] = [self.ripl.sample('(count_birds %d %d %d)' % (y, d, i)) for i in range(self.cells)]
return bird_locations
def drawBirdLocations(self):
bird_locs = self.getBirdLocations()
for y in self.years:
path = 'bird_moves%d/%d/' % (self.dataset, y)
ensure(path)
for d in self.days:
drawBirds(bird_locs[y][d], path + '%02d.png' % d, **self.parameters)
def getBirdMoves(self):
bird_moves = {}
for d in self.days[:-1]:
bird_moves_raw = self.ripl.sample('(get_birds_moving3 %d)' % d)
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
bird_moves[(y, d, i, j)] = bird_moves_raw[y][i][j]
return bird_moves
def forceBirdMoves(self,d,cell_limit=100):
# currently ignore including years also
detvalues = 0
for i in range(self.cells)[:cell_limit]: |
if ground>0 and current>0:
self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),ground)
print 'force: moving(0 %d %d %d) from %f to %f'%(d,i,j,current,ground)
# try:
# self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),
# self.ground[(0,d,i,j)] )
# except:
# detvalues += 1
# print 'detvalues total = %d'%detvalues
def computeScoreDay(self, d):
bird_moves = self.ripl.sample('(get_birds_moving3 %d)' % d)
score = 0
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
score += (bird_moves[y][i][j] - self.ground[(y, d, i, j)]) ** 2
return score
def computeScore(self):
infer_bird_moves = self.getBirdMoves()
score = 0
for key in infer_bird_moves:
score += (infer_bird_moves[key] - self.ground[key]) ** 2
return score | for j in range(self.cells)[:cell_limit]:
ground = self.ground[(0,d,i,j)]
current = self.ripl.sample('(get_birds_moving 0 %d %d %d)'%(d,i,j)) | random_line_split |
model_param_old.py | import venture.shortcuts as s
from utils import *
from venture.unit import VentureUnit
from venture.ripl.ripl import _strip_types
num_features = 4
def loadFeatures(dataset, name, years, days,maxDay=None):
features_file = "data/input/dataset%d/%s-features.csv" % (dataset, name)
print "Loading features from %s" % features_file
features = readFeatures(features_file, maxYear=max(years)+1,maxDay=maxDay)
for (y, d, i, j) in features.keys():
if y not in years:# or d not in days:
del features[(y, d, i, j)]
return toVenture(features)
def loadObservations(ripl, dataset, name, years, days):
observations_file = "data/input/dataset%d/%s-observations.csv" % (dataset, name)
observations = readObservations(observations_file)
for y in years:
for (d, ns) in observations[y]:
if d not in days: continue
for i, n in enumerate(ns):
#print y, d, i
ripl.observe('(observe_birds %d %d %d)' % (y, d, i), n)
class OneBird(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.cells = params['cells']
self.years = params['years']
self.days = params['days']
self.features = loadFeatures(1, self.name, self.years, self.days)
super(OneBird, self).__init__(ripl, params)
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
# we want to infer the hyperparameters of a log-linear model
ripl.assume('scale', '(scope_include (quote hypers) (quote scale) (gamma 1 1))')
for k in range(num_features):
ripl.assume('hypers%d' % k, '(scope_include (quote hypers) %d (* scale (normal 0 10)))' % k)
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
# phi is the unnormalized probability of a bird moving
# from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(let ((fs (lookup features (array y d i j))))
(exp %s))))"""
% fold('+', '(* hypers_k_ (lookup fs _k_))', '_k_', num_features))
ripl.assume('get_bird_move_dist',
'(mem (lambda (y d i) ' +
fold('simplex', '(phi y d i j)', 'j', self.cells) +
'))')
ripl.assume('cell_array', fold('array', 'j', 'j', self.cells))
# samples where a bird would move to from cell i on day d
# the bird's id is used to identify the scope
ripl.assume('move', """
(lambda (y d i)
(let ((dist (get_bird_move_dist y d i)))
(scope_include (quote move) (array y d)
(categorical dist cell_array))))""")
ripl.assume('get_bird_pos', """
(mem (lambda (y d)
(if (= d 0) 0
(move y (- d 1) (get_bird_pos y (- d 1))))))""")
ripl.assume('count_birds', """
(lambda (y d i)
(if (= (get_bird_pos y d) i)
1 0))""")
ripl.assume('observe_birds', '(lambda (y d i) (poisson (+ (count_birds y d i) 0.0001)))')
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
observations_file = "data/input/dataset%d/%s-observations.csv" % (1, self.name)
observations = readObservations(observations_file)
self.unconstrained = []
for y in self.years:
for (d, ns) in observations[y]:
if d not in self.days: continue
if d == 0: continue
loc = None
for i, n in enumerate(ns):
if n > 0:
loc = i
break
if loc is None:
self.unconstrained.append((y, d-1))
#ripl.predict('(get_bird_pos %d %d)' % (y, d))
else:
ripl.observe('(get_bird_pos %d %d)' % (y, d), loc)
def inferMove(self, ripl = None):
if ripl is None:
ripl = self.ripl
for block in self.unconstrained:
ripl.infer({'kernel': 'gibbs', 'scope': 'move', 'block': block, 'transitions': 1})
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
class Poisson(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.width = params['width']
self.height = params['height']
self.cells = params['cells']
self.dataset = params['dataset']
self.total_birds = params['total_birds']
self.years = params['years']
self.days = params['days']
self.hypers = params["hypers"]
self.learnHypers = True if isinstance(self.hypers[0],str) else False
self.ground = readReconstruction(params)
if 'maxDay' in params:
self.maxDay = params["maxDay"]
self.features = loadFeatures(self.dataset, self.name, self.years, self.days,
maxDay=self.maxDay)
else:
self.features = loadFeatures(self.dataset, self.name, self.years, self.days)
val_features = self.features['value']
self.parsedFeatures = {k:_strip_types(v) for k,v in val_features.items() }
super(Poisson, self).__init__(ripl, params)
def feat_i(y,d,i,feat=2):
'Input *feat in range(3) (default=wind), return all values i,j for fixed i'
return [self.parsedFeatures[(y,d,i,j)][feat] for j in range(100)]
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
ripl.assume('total_birds', self.total_birds)
ripl.assume('cells', self.cells)
#ripl.assume('num_features', num_features)
# we want to infer the hyperparameters of a log-linear model
if not self.learnHypers:
|
else:
for k, prior in enumerate(self.hypers):
ripl.assume('hypers%d' % k,'(scope_include (quote hypers) 0 %s )'%prior)
#ripl.assume('hypers%d' % k,'(scope_include (quote hypers) %d %s )'%(k,prior) )
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
ripl.assume('width', self.width)
ripl.assume('height', self.height)
ripl.assume('max_dist2', '18')
ripl.assume('cell2X', '(lambda (cell) (int_div cell height))')
ripl.assume('cell2Y', '(lambda (cell) (int_mod cell height))')
#ripl.assume('cell2P', '(lambda (cell) (make_pair (cell2X cell) (cell2Y cell)))')
ripl.assume('XY2cell', '(lambda (x y) (+ (* height x) y))')
ripl.assume('square', '(lambda (x) (* x x))')
ripl.assume('dist2', """
(lambda (x1 y1 x2 y2)
(+ (square (- x1 x2)) (square (- y1 y2))))""")
ripl.assume('cell_dist2', """
(lambda (i j)
(dist2
(cell2X i) (cell2Y i)
(cell2X j) (cell2Y j)))""")
# phi is the unnormalized probability of a bird moving from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""
% fold('+', '(* hypers__k (lookup fs __k))', '__k', num_features))
ripl.assume('phi2', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""%'blah')
ripl.assume('get_bird_move_dist', """
(lambda (y d i)
(lambda (j)
(phi y d i j)))""")
ripl.assume('foldl', """
(lambda (op x min max f)
(if (= min max) x
(foldl op (op x (f min)) (+ min 1) max f)))""")
ripl.assume('multinomial_func', """
(lambda (n min max f)
(let ((normalize (foldl + 0 min max f)))
(mem (lambda (i)
(poisson (* n (/ (f i) normalize)))))))""")
ripl.assume('count_birds', """
(mem (lambda (y d i)
(if (= d 0) (if (= i 0) total_birds 0)""" +
fold('+', '(get_birds_moving y (- d 1) __j i)', '__j', self.cells) + ")))")
# bird_movements_loc
# if no birds at i, no movement to any j from i
# normalize is normalizing constant for probms from i
# n is product of count at position i * normed probility of i to j
# return lambda that takes j and return poisson of this n
## [NOTE: strings are bad, need library functions, need sq bracks for let]
ripl.assume('bird_movements_loc', """
(mem (lambda (y d i)
(if (= (count_birds y d i) 0)
(lambda (j) 0)
(let ((normalize (foldl + 0 0 cells (lambda (j) (phi y d i j)))))
(mem (lambda (j)
(if (= (phi y d i j) 0) 0
(let ((n (* (count_birds y d i) (/ (phi y d i j) normalize))))
(scope_include d (array y d i j)
(poisson n))))))))))""")
#ripl.assume('bird_movements', '(mem (lambda (y d) %s))' % fold('array', '(bird_movements_loc y d __i)', '__i', self.cells))
ripl.assume('observe_birds', '(mem (lambda (y d i) (poisson (+ (count_birds y d i) 0.0001))))')
# returns number birds from i,j (we want to force this value)
ripl.assume('get_birds_moving', """
(lambda (y d i j)
((bird_movements_loc y d i) j))""")
ripl.assume('get_birds_moving1', '(lambda (y d i) %s)' % fold('array', '(get_birds_moving y d i __j)', '__j', self.cells))
ripl.assume('get_birds_moving2', '(lambda (y d) %s)' % fold('array', '(get_birds_moving1 y d __i)', '__i', self.cells))
ripl.assume('get_birds_moving3', '(lambda (d) %s)' % fold('array', '(get_birds_moving2 __y d)', '__y', len(self.years)))
ripl.assume('get_birds_moving4', '(lambda () %s)' % fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1))
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading observations"
loadObservations(ripl, self.dataset, self.name, self.years, self.days)
def loadModel(self, ripl = None):
if ripl is None:
ripl = self.ripl
self.loadAssumes(ripl)
self.loadObserves(ripl)
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
def updateObserves(self, d):
self.days.append(d)
#if d > 0: self.ripl.forget('bird_moves')
loadObservations(self.ripl, self.dataset, self.name, self.years, [d])
self.ripl.infer('(incorporate)')
#self.ripl.predict(fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1), label='bird_moves')
def getBirdLocations(self, years=None, days=None):
if years is None: years = self.years
if days is None: days = self.days
bird_locations = {}
for y in years:
bird_locations[y] = {}
for d in days:
bird_locations[y][d] = [self.ripl.sample('(count_birds %d %d %d)' % (y, d, i)) for i in range(self.cells)]
return bird_locations
def drawBirdLocations(self):
bird_locs = self.getBirdLocations()
for y in self.years:
path = 'bird_moves%d/%d/' % (self.dataset, y)
ensure(path)
for d in self.days:
drawBirds(bird_locs[y][d], path + '%02d.png' % d, **self.parameters)
def getBirdMoves(self):
bird_moves = {}
for d in self.days[:-1]:
bird_moves_raw = self.ripl.sample('(get_birds_moving3 %d)' % d)
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
bird_moves[(y, d, i, j)] = bird_moves_raw[y][i][j]
return bird_moves
def forceBirdMoves(self,d,cell_limit=100):
# currently ignore including years also
detvalues = 0
for i in range(self.cells)[:cell_limit]:
for j in range(self.cells)[:cell_limit]:
ground = self.ground[(0,d,i,j)]
current = self.ripl.sample('(get_birds_moving 0 %d %d %d)'%(d,i,j))
if ground>0 and current>0:
self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),ground)
print 'force: moving(0 %d %d %d) from %f to %f'%(d,i,j,current,ground)
# try:
# self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),
# self.ground[(0,d,i,j)] )
# except:
# detvalues += 1
# print 'detvalues total = %d'%detvalues
def computeScoreDay(self, d):
bird_moves = self.ripl.sample('(get_birds_moving3 %d)' % d)
score = 0
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
score += (bird_moves[y][i][j] - self.ground[(y, d, i, j)]) ** 2
return score
def computeScore(self):
infer_bird_moves = self.getBirdMoves()
score = 0
for key in infer_bird_moves:
score += (infer_bird_moves[key] - self.ground[key]) ** 2
return score
| for k, b in enumerate(self.hypers):
ripl.assume('hypers%d' % k, b) | conditional_block |
model_param_old.py | import venture.shortcuts as s
from utils import *
from venture.unit import VentureUnit
from venture.ripl.ripl import _strip_types
num_features = 4
def loadFeatures(dataset, name, years, days,maxDay=None):
features_file = "data/input/dataset%d/%s-features.csv" % (dataset, name)
print "Loading features from %s" % features_file
features = readFeatures(features_file, maxYear=max(years)+1,maxDay=maxDay)
for (y, d, i, j) in features.keys():
if y not in years:# or d not in days:
del features[(y, d, i, j)]
return toVenture(features)
def loadObservations(ripl, dataset, name, years, days):
observations_file = "data/input/dataset%d/%s-observations.csv" % (dataset, name)
observations = readObservations(observations_file)
for y in years:
for (d, ns) in observations[y]:
if d not in days: continue
for i, n in enumerate(ns):
#print y, d, i
ripl.observe('(observe_birds %d %d %d)' % (y, d, i), n)
class OneBird(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.cells = params['cells']
self.years = params['years']
self.days = params['days']
self.features = loadFeatures(1, self.name, self.years, self.days)
super(OneBird, self).__init__(ripl, params)
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
# we want to infer the hyperparameters of a log-linear model
ripl.assume('scale', '(scope_include (quote hypers) (quote scale) (gamma 1 1))')
for k in range(num_features):
ripl.assume('hypers%d' % k, '(scope_include (quote hypers) %d (* scale (normal 0 10)))' % k)
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
# phi is the unnormalized probability of a bird moving
# from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(let ((fs (lookup features (array y d i j))))
(exp %s))))"""
% fold('+', '(* hypers_k_ (lookup fs _k_))', '_k_', num_features))
ripl.assume('get_bird_move_dist',
'(mem (lambda (y d i) ' +
fold('simplex', '(phi y d i j)', 'j', self.cells) +
'))')
ripl.assume('cell_array', fold('array', 'j', 'j', self.cells))
# samples where a bird would move to from cell i on day d
# the bird's id is used to identify the scope
ripl.assume('move', """
(lambda (y d i)
(let ((dist (get_bird_move_dist y d i)))
(scope_include (quote move) (array y d)
(categorical dist cell_array))))""")
ripl.assume('get_bird_pos', """
(mem (lambda (y d)
(if (= d 0) 0
(move y (- d 1) (get_bird_pos y (- d 1))))))""")
ripl.assume('count_birds', """
(lambda (y d i)
(if (= (get_bird_pos y d) i)
1 0))""")
ripl.assume('observe_birds', '(lambda (y d i) (poisson (+ (count_birds y d i) 0.0001)))')
def | (self, ripl = None):
if ripl is None:
ripl = self.ripl
observations_file = "data/input/dataset%d/%s-observations.csv" % (1, self.name)
observations = readObservations(observations_file)
self.unconstrained = []
for y in self.years:
for (d, ns) in observations[y]:
if d not in self.days: continue
if d == 0: continue
loc = None
for i, n in enumerate(ns):
if n > 0:
loc = i
break
if loc is None:
self.unconstrained.append((y, d-1))
#ripl.predict('(get_bird_pos %d %d)' % (y, d))
else:
ripl.observe('(get_bird_pos %d %d)' % (y, d), loc)
def inferMove(self, ripl = None):
if ripl is None:
ripl = self.ripl
for block in self.unconstrained:
ripl.infer({'kernel': 'gibbs', 'scope': 'move', 'block': block, 'transitions': 1})
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
class Poisson(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.width = params['width']
self.height = params['height']
self.cells = params['cells']
self.dataset = params['dataset']
self.total_birds = params['total_birds']
self.years = params['years']
self.days = params['days']
self.hypers = params["hypers"]
self.learnHypers = True if isinstance(self.hypers[0],str) else False
self.ground = readReconstruction(params)
if 'maxDay' in params:
self.maxDay = params["maxDay"]
self.features = loadFeatures(self.dataset, self.name, self.years, self.days,
maxDay=self.maxDay)
else:
self.features = loadFeatures(self.dataset, self.name, self.years, self.days)
val_features = self.features['value']
self.parsedFeatures = {k:_strip_types(v) for k,v in val_features.items() }
super(Poisson, self).__init__(ripl, params)
def feat_i(y,d,i,feat=2):
'Input *feat in range(3) (default=wind), return all values i,j for fixed i'
return [self.parsedFeatures[(y,d,i,j)][feat] for j in range(100)]
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
ripl.assume('total_birds', self.total_birds)
ripl.assume('cells', self.cells)
#ripl.assume('num_features', num_features)
# we want to infer the hyperparameters of a log-linear model
if not self.learnHypers:
for k, b in enumerate(self.hypers):
ripl.assume('hypers%d' % k, b)
else:
for k, prior in enumerate(self.hypers):
ripl.assume('hypers%d' % k,'(scope_include (quote hypers) 0 %s )'%prior)
#ripl.assume('hypers%d' % k,'(scope_include (quote hypers) %d %s )'%(k,prior) )
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
ripl.assume('width', self.width)
ripl.assume('height', self.height)
ripl.assume('max_dist2', '18')
ripl.assume('cell2X', '(lambda (cell) (int_div cell height))')
ripl.assume('cell2Y', '(lambda (cell) (int_mod cell height))')
#ripl.assume('cell2P', '(lambda (cell) (make_pair (cell2X cell) (cell2Y cell)))')
ripl.assume('XY2cell', '(lambda (x y) (+ (* height x) y))')
ripl.assume('square', '(lambda (x) (* x x))')
ripl.assume('dist2', """
(lambda (x1 y1 x2 y2)
(+ (square (- x1 x2)) (square (- y1 y2))))""")
ripl.assume('cell_dist2', """
(lambda (i j)
(dist2
(cell2X i) (cell2Y i)
(cell2X j) (cell2Y j)))""")
# phi is the unnormalized probability of a bird moving from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""
% fold('+', '(* hypers__k (lookup fs __k))', '__k', num_features))
ripl.assume('phi2', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""%'blah')
ripl.assume('get_bird_move_dist', """
(lambda (y d i)
(lambda (j)
(phi y d i j)))""")
ripl.assume('foldl', """
(lambda (op x min max f)
(if (= min max) x
(foldl op (op x (f min)) (+ min 1) max f)))""")
ripl.assume('multinomial_func', """
(lambda (n min max f)
(let ((normalize (foldl + 0 min max f)))
(mem (lambda (i)
(poisson (* n (/ (f i) normalize)))))))""")
ripl.assume('count_birds', """
(mem (lambda (y d i)
(if (= d 0) (if (= i 0) total_birds 0)""" +
fold('+', '(get_birds_moving y (- d 1) __j i)', '__j', self.cells) + ")))")
# bird_movements_loc
# if no birds at i, no movement to any j from i
# normalize is normalizing constant for probms from i
# n is product of count at position i * normed probility of i to j
# return lambda that takes j and return poisson of this n
## [NOTE: strings are bad, need library functions, need sq bracks for let]
ripl.assume('bird_movements_loc', """
(mem (lambda (y d i)
(if (= (count_birds y d i) 0)
(lambda (j) 0)
(let ((normalize (foldl + 0 0 cells (lambda (j) (phi y d i j)))))
(mem (lambda (j)
(if (= (phi y d i j) 0) 0
(let ((n (* (count_birds y d i) (/ (phi y d i j) normalize))))
(scope_include d (array y d i j)
(poisson n))))))))))""")
#ripl.assume('bird_movements', '(mem (lambda (y d) %s))' % fold('array', '(bird_movements_loc y d __i)', '__i', self.cells))
ripl.assume('observe_birds', '(mem (lambda (y d i) (poisson (+ (count_birds y d i) 0.0001))))')
# returns number birds from i,j (we want to force this value)
ripl.assume('get_birds_moving', """
(lambda (y d i j)
((bird_movements_loc y d i) j))""")
ripl.assume('get_birds_moving1', '(lambda (y d i) %s)' % fold('array', '(get_birds_moving y d i __j)', '__j', self.cells))
ripl.assume('get_birds_moving2', '(lambda (y d) %s)' % fold('array', '(get_birds_moving1 y d __i)', '__i', self.cells))
ripl.assume('get_birds_moving3', '(lambda (d) %s)' % fold('array', '(get_birds_moving2 __y d)', '__y', len(self.years)))
ripl.assume('get_birds_moving4', '(lambda () %s)' % fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1))
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading observations"
loadObservations(ripl, self.dataset, self.name, self.years, self.days)
def loadModel(self, ripl = None):
if ripl is None:
ripl = self.ripl
self.loadAssumes(ripl)
self.loadObserves(ripl)
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
def updateObserves(self, d):
self.days.append(d)
#if d > 0: self.ripl.forget('bird_moves')
loadObservations(self.ripl, self.dataset, self.name, self.years, [d])
self.ripl.infer('(incorporate)')
#self.ripl.predict(fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1), label='bird_moves')
def getBirdLocations(self, years=None, days=None):
if years is None: years = self.years
if days is None: days = self.days
bird_locations = {}
for y in years:
bird_locations[y] = {}
for d in days:
bird_locations[y][d] = [self.ripl.sample('(count_birds %d %d %d)' % (y, d, i)) for i in range(self.cells)]
return bird_locations
def drawBirdLocations(self):
bird_locs = self.getBirdLocations()
for y in self.years:
path = 'bird_moves%d/%d/' % (self.dataset, y)
ensure(path)
for d in self.days:
drawBirds(bird_locs[y][d], path + '%02d.png' % d, **self.parameters)
def getBirdMoves(self):
bird_moves = {}
for d in self.days[:-1]:
bird_moves_raw = self.ripl.sample('(get_birds_moving3 %d)' % d)
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
bird_moves[(y, d, i, j)] = bird_moves_raw[y][i][j]
return bird_moves
def forceBirdMoves(self,d,cell_limit=100):
# currently ignore including years also
detvalues = 0
for i in range(self.cells)[:cell_limit]:
for j in range(self.cells)[:cell_limit]:
ground = self.ground[(0,d,i,j)]
current = self.ripl.sample('(get_birds_moving 0 %d %d %d)'%(d,i,j))
if ground>0 and current>0:
self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),ground)
print 'force: moving(0 %d %d %d) from %f to %f'%(d,i,j,current,ground)
# try:
# self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),
# self.ground[(0,d,i,j)] )
# except:
# detvalues += 1
# print 'detvalues total = %d'%detvalues
def computeScoreDay(self, d):
bird_moves = self.ripl.sample('(get_birds_moving3 %d)' % d)
score = 0
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
score += (bird_moves[y][i][j] - self.ground[(y, d, i, j)]) ** 2
return score
def computeScore(self):
infer_bird_moves = self.getBirdMoves()
score = 0
for key in infer_bird_moves:
score += (infer_bird_moves[key] - self.ground[key]) ** 2
return score
| loadObserves | identifier_name |
model_param_old.py | import venture.shortcuts as s
from utils import *
from venture.unit import VentureUnit
from venture.ripl.ripl import _strip_types
num_features = 4
def loadFeatures(dataset, name, years, days,maxDay=None):
features_file = "data/input/dataset%d/%s-features.csv" % (dataset, name)
print "Loading features from %s" % features_file
features = readFeatures(features_file, maxYear=max(years)+1,maxDay=maxDay)
for (y, d, i, j) in features.keys():
if y not in years:# or d not in days:
del features[(y, d, i, j)]
return toVenture(features)
def loadObservations(ripl, dataset, name, years, days):
observations_file = "data/input/dataset%d/%s-observations.csv" % (dataset, name)
observations = readObservations(observations_file)
for y in years:
for (d, ns) in observations[y]:
if d not in days: continue
for i, n in enumerate(ns):
#print y, d, i
ripl.observe('(observe_birds %d %d %d)' % (y, d, i), n)
class OneBird(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.cells = params['cells']
self.years = params['years']
self.days = params['days']
self.features = loadFeatures(1, self.name, self.years, self.days)
super(OneBird, self).__init__(ripl, params)
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
# we want to infer the hyperparameters of a log-linear model
ripl.assume('scale', '(scope_include (quote hypers) (quote scale) (gamma 1 1))')
for k in range(num_features):
ripl.assume('hypers%d' % k, '(scope_include (quote hypers) %d (* scale (normal 0 10)))' % k)
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
# phi is the unnormalized probability of a bird moving
# from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(let ((fs (lookup features (array y d i j))))
(exp %s))))"""
% fold('+', '(* hypers_k_ (lookup fs _k_))', '_k_', num_features))
ripl.assume('get_bird_move_dist',
'(mem (lambda (y d i) ' +
fold('simplex', '(phi y d i j)', 'j', self.cells) +
'))')
ripl.assume('cell_array', fold('array', 'j', 'j', self.cells))
# samples where a bird would move to from cell i on day d
# the bird's id is used to identify the scope
ripl.assume('move', """
(lambda (y d i)
(let ((dist (get_bird_move_dist y d i)))
(scope_include (quote move) (array y d)
(categorical dist cell_array))))""")
ripl.assume('get_bird_pos', """
(mem (lambda (y d)
(if (= d 0) 0
(move y (- d 1) (get_bird_pos y (- d 1))))))""")
ripl.assume('count_birds', """
(lambda (y d i)
(if (= (get_bird_pos y d) i)
1 0))""")
ripl.assume('observe_birds', '(lambda (y d i) (poisson (+ (count_birds y d i) 0.0001)))')
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
observations_file = "data/input/dataset%d/%s-observations.csv" % (1, self.name)
observations = readObservations(observations_file)
self.unconstrained = []
for y in self.years:
for (d, ns) in observations[y]:
if d not in self.days: continue
if d == 0: continue
loc = None
for i, n in enumerate(ns):
if n > 0:
loc = i
break
if loc is None:
self.unconstrained.append((y, d-1))
#ripl.predict('(get_bird_pos %d %d)' % (y, d))
else:
ripl.observe('(get_bird_pos %d %d)' % (y, d), loc)
def inferMove(self, ripl = None):
if ripl is None:
ripl = self.ripl
for block in self.unconstrained:
ripl.infer({'kernel': 'gibbs', 'scope': 'move', 'block': block, 'transitions': 1})
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
class Poisson(VentureUnit):
def __init__(self, ripl, params):
self.name = params['name']
self.width = params['width']
self.height = params['height']
self.cells = params['cells']
self.dataset = params['dataset']
self.total_birds = params['total_birds']
self.years = params['years']
self.days = params['days']
self.hypers = params["hypers"]
self.learnHypers = True if isinstance(self.hypers[0],str) else False
self.ground = readReconstruction(params)
if 'maxDay' in params:
self.maxDay = params["maxDay"]
self.features = loadFeatures(self.dataset, self.name, self.years, self.days,
maxDay=self.maxDay)
else:
self.features = loadFeatures(self.dataset, self.name, self.years, self.days)
val_features = self.features['value']
self.parsedFeatures = {k:_strip_types(v) for k,v in val_features.items() }
super(Poisson, self).__init__(ripl, params)
def feat_i(y,d,i,feat=2):
'Input *feat in range(3) (default=wind), return all values i,j for fixed i'
return [self.parsedFeatures[(y,d,i,j)][feat] for j in range(100)]
def loadAssumes(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading assumes"
ripl.assume('total_birds', self.total_birds)
ripl.assume('cells', self.cells)
#ripl.assume('num_features', num_features)
# we want to infer the hyperparameters of a log-linear model
if not self.learnHypers:
for k, b in enumerate(self.hypers):
ripl.assume('hypers%d' % k, b)
else:
for k, prior in enumerate(self.hypers):
ripl.assume('hypers%d' % k,'(scope_include (quote hypers) 0 %s )'%prior)
#ripl.assume('hypers%d' % k,'(scope_include (quote hypers) %d %s )'%(k,prior) )
# the features will all be observed
#ripl.assume('features', '(mem (lambda (y d i j k) (normal 0 1)))')
ripl.assume('features', self.features)
ripl.assume('width', self.width)
ripl.assume('height', self.height)
ripl.assume('max_dist2', '18')
ripl.assume('cell2X', '(lambda (cell) (int_div cell height))')
ripl.assume('cell2Y', '(lambda (cell) (int_mod cell height))')
#ripl.assume('cell2P', '(lambda (cell) (make_pair (cell2X cell) (cell2Y cell)))')
ripl.assume('XY2cell', '(lambda (x y) (+ (* height x) y))')
ripl.assume('square', '(lambda (x) (* x x))')
ripl.assume('dist2', """
(lambda (x1 y1 x2 y2)
(+ (square (- x1 x2)) (square (- y1 y2))))""")
ripl.assume('cell_dist2', """
(lambda (i j)
(dist2
(cell2X i) (cell2Y i)
(cell2X j) (cell2Y j)))""")
# phi is the unnormalized probability of a bird moving from cell i to cell j on day d
ripl.assume('phi', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""
% fold('+', '(* hypers__k (lookup fs __k))', '__k', num_features))
ripl.assume('phi2', """
(mem (lambda (y d i j)
(if (> (cell_dist2 i j) max_dist2) 0
(let ((fs (lookup features (array y d i j))))
(exp %s)))))"""%'blah')
ripl.assume('get_bird_move_dist', """
(lambda (y d i)
(lambda (j)
(phi y d i j)))""")
ripl.assume('foldl', """
(lambda (op x min max f)
(if (= min max) x
(foldl op (op x (f min)) (+ min 1) max f)))""")
ripl.assume('multinomial_func', """
(lambda (n min max f)
(let ((normalize (foldl + 0 min max f)))
(mem (lambda (i)
(poisson (* n (/ (f i) normalize)))))))""")
ripl.assume('count_birds', """
(mem (lambda (y d i)
(if (= d 0) (if (= i 0) total_birds 0)""" +
fold('+', '(get_birds_moving y (- d 1) __j i)', '__j', self.cells) + ")))")
# bird_movements_loc
# if no birds at i, no movement to any j from i
# normalize is normalizing constant for probms from i
# n is product of count at position i * normed probility of i to j
# return lambda that takes j and return poisson of this n
## [NOTE: strings are bad, need library functions, need sq bracks for let]
ripl.assume('bird_movements_loc', """
(mem (lambda (y d i)
(if (= (count_birds y d i) 0)
(lambda (j) 0)
(let ((normalize (foldl + 0 0 cells (lambda (j) (phi y d i j)))))
(mem (lambda (j)
(if (= (phi y d i j) 0) 0
(let ((n (* (count_birds y d i) (/ (phi y d i j) normalize))))
(scope_include d (array y d i j)
(poisson n))))))))))""")
#ripl.assume('bird_movements', '(mem (lambda (y d) %s))' % fold('array', '(bird_movements_loc y d __i)', '__i', self.cells))
ripl.assume('observe_birds', '(mem (lambda (y d i) (poisson (+ (count_birds y d i) 0.0001))))')
# returns number birds from i,j (we want to force this value)
ripl.assume('get_birds_moving', """
(lambda (y d i j)
((bird_movements_loc y d i) j))""")
ripl.assume('get_birds_moving1', '(lambda (y d i) %s)' % fold('array', '(get_birds_moving y d i __j)', '__j', self.cells))
ripl.assume('get_birds_moving2', '(lambda (y d) %s)' % fold('array', '(get_birds_moving1 y d __i)', '__i', self.cells))
ripl.assume('get_birds_moving3', '(lambda (d) %s)' % fold('array', '(get_birds_moving2 __y d)', '__y', len(self.years)))
ripl.assume('get_birds_moving4', '(lambda () %s)' % fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1))
def loadObserves(self, ripl = None):
if ripl is None:
ripl = self.ripl
print "Loading observations"
loadObservations(ripl, self.dataset, self.name, self.years, self.days)
def loadModel(self, ripl = None):
if ripl is None:
ripl = self.ripl
self.loadAssumes(ripl)
self.loadObserves(ripl)
def makeAssumes(self):
self.loadAssumes(ripl=self)
def makeObserves(self):
self.loadObserves(ripl=self)
def updateObserves(self, d):
self.days.append(d)
#if d > 0: self.ripl.forget('bird_moves')
loadObservations(self.ripl, self.dataset, self.name, self.years, [d])
self.ripl.infer('(incorporate)')
#self.ripl.predict(fold('array', '(get_birds_moving3 __d)', '__d', len(self.days)-1), label='bird_moves')
def getBirdLocations(self, years=None, days=None):
if years is None: years = self.years
if days is None: days = self.days
bird_locations = {}
for y in years:
bird_locations[y] = {}
for d in days:
bird_locations[y][d] = [self.ripl.sample('(count_birds %d %d %d)' % (y, d, i)) for i in range(self.cells)]
return bird_locations
def drawBirdLocations(self):
bird_locs = self.getBirdLocations()
for y in self.years:
path = 'bird_moves%d/%d/' % (self.dataset, y)
ensure(path)
for d in self.days:
drawBirds(bird_locs[y][d], path + '%02d.png' % d, **self.parameters)
def getBirdMoves(self):
bird_moves = {}
for d in self.days[:-1]:
bird_moves_raw = self.ripl.sample('(get_birds_moving3 %d)' % d)
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
bird_moves[(y, d, i, j)] = bird_moves_raw[y][i][j]
return bird_moves
def forceBirdMoves(self,d,cell_limit=100):
# currently ignore including years also
detvalues = 0
for i in range(self.cells)[:cell_limit]:
for j in range(self.cells)[:cell_limit]:
ground = self.ground[(0,d,i,j)]
current = self.ripl.sample('(get_birds_moving 0 %d %d %d)'%(d,i,j))
if ground>0 and current>0:
self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),ground)
print 'force: moving(0 %d %d %d) from %f to %f'%(d,i,j,current,ground)
# try:
# self.ripl.force('(get_birds_moving 0 %d %d %d)'%(d,i,j),
# self.ground[(0,d,i,j)] )
# except:
# detvalues += 1
# print 'detvalues total = %d'%detvalues
def computeScoreDay(self, d):
bird_moves = self.ripl.sample('(get_birds_moving3 %d)' % d)
score = 0
for y in self.years:
for i in range(self.cells):
for j in range(self.cells):
score += (bird_moves[y][i][j] - self.ground[(y, d, i, j)]) ** 2
return score
def computeScore(self):
| infer_bird_moves = self.getBirdMoves()
score = 0
for key in infer_bird_moves:
score += (infer_bird_moves[key] - self.ground[key]) ** 2
return score | identifier_body |
|
score_codons.py | #!/usr/bin/python
'''
This script generates a codon optimised protein based upon a fasta protein
sequence and a table of relative codon usage.
'''
from sets import Set
import sys,argparse
from collections import defaultdict
import re
import numpy as np
import csv
import random
from Bio import SeqIO
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
#These commands use the argparse module to import files specified in the command line
ap = argparse.ArgumentParser()
ap.add_argument('--fasta_aa',required=True,type=str,help='protein sequence for conversion')
ap.add_argument('--fasta_cds',required=True,type=str,help='cds for conversion')
ap.add_argument('--codon_table',required=True,type=str,help='text file containing codon usage table')
ap.add_argument('--prefix',required=True,type=str,help='output directory/filename prefix for output files')
conf = ap.parse_args()
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
class AA_weight_obj(object):
"""
"""
def __init__(self, aa):
""" """
self.aa = aa
self.weightings = defaultdict(float)
self.weightings_adj = defaultdict(float)
self.max = float()
self.optimal = str()
self.codons = []
self.sorted_adj_weightings = []
self.sorted_codons = []
self.weight_list = []
self.weight_list_adj = []
def add_weight(self, codon, weight):
""" """
# print codon
# print weight
self.weightings[codon] = float(weight)
# if float(weight) > self.max:
# self.max = float(weight)
# self.optimal = codon
self.codons.append(codon)
self.weight_list.append(weight)
def | (self):
""" """
num_codons = len(self.codons)
r = float(random.randrange(0,10000, 1))
# r = float(random.randrange(0,num_codons*100, 1))
# print (self.aa)
# print(r)
r = np.divide(r, 10000)
# r = np.divide(r, 100)
# print(" of max ".join([str(r), str(num_codons)]))
for x,y in zip(self.codons,self.sorted_adj_weightings):
# print(" - ".join([str(r), str(x), str(y)]))
selected_codon = x
if float(y) >= float(r):
break
else:
r = r - float(y)
return selected_codon
def get_opt(self):
""" """
# sorted_weightings = sorted(self.weight_list)
# sorted_codons = [x for _,x in sorted(zip(self.weight_list,self.codons))]
# print sorted_weightings
# print sorted_codons
# return sorted_codons[-1]
return self.sorted_codons[-1]
def adjust_weight(self):
""" """
num_codons = len(self.weight_list)
# print num_codons
# print(self.weight_list)
self.weight_list_adj = [round(np.divide(float(x), num_codons),5) for x in self.weight_list]
# print self.weight_list_adj
self.sorted_adj_weightings = sorted(self.weight_list_adj)
self.sorted_codons = [x for _,x in sorted(zip(self.weight_list_adj,self.codons))]
for x,y in zip(self.sorted_codons, self.sorted_adj_weightings):
self.weightings_adj[x] = y
self.max = self.sorted_adj_weightings[-1]
class CodonTab_obj(object):
"""
"""
def __init__(self):
"""Return a Expression_obj whose name is *gene_id*"""
# self.organism = []
self.weighting_dict = defaultdict(list)
# self.codon_obj_dict = {}
self.codon_dict = {
'UUU':'F','UUC':'F',
'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',
'AUU':'I','AUC':'I','AUA':'I',
'AUG':'M',
'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',
'UCU':'S','UCC':'S','UCA':'S','UCG':'S',
'CCU':'P','CCC':'P','CCA':'P','CCG':'P',
'ACU':'T','ACC':'T','ACA':'T','ACG':'T',
'GCU':'A','GCC':'A','GCA':'A','GCG':'A',
'UAU':'Y','UAC':'Y',
'UAA':'X','UAG':'X',
'CAU':'H','CAC':'H',
'CAA':'Q','CAG':'Q',
'AAU':'N','AAC':'N',
'AAA':'K','AAG':'K',
'GAU':'D','GAC':'D',
'GAA':'E','GAG':'E',
'UGU':'C','UGC':'C',
'UGA':'X',
'UGG':'W',
'CGU':'R','CGC':'R','CGA':'R','CGG':'R',
'AGU':'S','AGC':'S',
'AGA':'R','AGG':'R',
'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'
}
def add_table(self, table):
""""""
table = table.replace(' ', '')
table_lines = table.split(';')
for line in table_lines:
split_line = line.split(':')
codon = split_line[0]
# print codon
weighting = split_line[1]
# print weighting
aa = self.codon_dict[codon]
if self.weighting_dict[aa] and self.weighting_dict[aa][0]:
obj = self.weighting_dict[aa][0]
# print obj.weightings
else:
obj = AA_weight_obj(aa)
obj.add_weight(codon, weighting)
self.weighting_dict[aa].append(obj)
for aa in self.weighting_dict.keys():
self.weighting_dict[aa][0].adjust_weight()
def optimise_rand(prot):
new_seq = ''
for aa in prot:
new_aa = vd_table_obj.weighting_dict[aa][0].random_codon()
new_seq = new_seq + new_aa
return(new_seq)
def optimise_best(prot):
new_seq = ''
for aa in prot:
# print aa
# new_aa = vd_table_obj.weighting_dict[aa][0].get_opt()
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[-1]
new_seq = new_seq + new_aa
return(new_seq)
def optimise_worst(prot):
new_seq = ''
for aa in prot:
# print aa
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[0]
new_seq = new_seq + new_aa
return(new_seq)
def score_seq(seq, table_obj):
codons = [seq[i:i+3] for i in range(0, len(seq), 3)]
total_score = float(0)
total_max = float(0)
for codon in codons:
aa = table_obj.codon_dict[codon]
score = table_obj.weighting_dict[aa][0].weightings_adj[codon]
# score = score - table_obj.weighting_dict[aa][0].weight_list_adj[0]
max = table_obj.weighting_dict[aa][0].max
total_score = total_score + score
total_max = total_max + max
return [round(np.divide(total_score, total_max), 2), round(np.divide(total_max, total_max), 2)]
# scores = []
# for aa in seq.split(''):
# scores.append(score_dict[aa])
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
seq_records = list(SeqIO.parse(conf.fasta_aa, "fasta"))
cds_records = list(SeqIO.parse(conf.fasta_cds, "fasta"))
prefix = conf.prefix
with open(conf.codon_table) as f:
table_lines = []
for line in f.readlines():
table_lines.append(line.rstrip())
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
record = seq_records[0]
# print record
prot = record.seq
# prot = 'MVSKGEEDNMAIIKEFMRFKVHMEGSVNGHEFEIEGEGEGRPYEGTQTAKLKVTKGGPLPFAWDILSPQFMYGSKAYVKHPADIPDYLKLSFPEGFKWERVMNFEDGGVVTVTQDSSLQDGEFIYKVKLRGTNFPSDGPVMQKKTMGWEASSERMYPEDGALKGEIKQRLKLKDGGHYDAEVKTTYKAKKPVQLPGAYNVNIKLDITSHNEDYTIVEQYERAEGRHSTGGMDELYK'
table = "".join(table_lines)
# table = 'UUU: 0.55; UCU: 0.85; UAU: 0.40; UGU: 0.44; UUC: 1.45; UCC: 1.41; UAC: 1.60; UGC: 1.56; UUA: 0.07; UCA: 0.51; UAA: 1.04; UGA: 1.06; UUG: 0.55; UCG: 1.36; UAG: 0.90; UGG: 1.00; CUU: 0.84; CCU: 0.93; CAU: 0.50; CGU: 0.97; CUC: 2.49; CCC: 1.66; CAC: 1.50; CGC: 2.45; CUA: 0.23; CCA: 0.53; CAA: 0.50; CGA: 0.75; CUG: 1.81; CCG: 0.89; CAG: 1.50; CGG: 0.71; AUU: 0.95; ACU: 0.58; AAU: 0.37; AGU: 0.39; AUC: 1.91; ACC: 1.62; AAC: 1.63; AGC: 1.49; AUA: 0.14; ACA: 0.58; AAA: 0.26; AGA: 0.36; AUG: 1.00; ACG: 1.22; AAG: 1.74; AGG: 0.76; GUU: 0.73; GCU: 0.80; GAU: 0.61; GGU: 0.91; GUC: 2.20; GCC: 1.98; GAC: 1.39; GGC: 2.32; GUA: 0.18; GCA: 0.44; GAA: 0.48; GGA: 0.46; GUG: 0.88; GCG: 0.77; GAG: 1.52; GGG: 0.31'
vd_table_obj = CodonTab_obj()
vd_table_obj.add_table(table)
# for k in vd_table_obj.weighting_dict.keys():
# print(vd_table_obj.weighting_dict[k][0].weightings)
# print(prot)
#-----------------------------------------------------
# Step X
# Optimise codons - random weightings
#-----------------------------------------------------
print("randomised codons:")
new_cds = optimise_rand(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - optimum codons
#-----------------------------------------------------
print("optimum sequence:")
new_cds = optimise_best(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - worst codons
#-----------------------------------------------------
print("worst sequence:")
new_cds = optimise_worst(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Score 1000 sequences for optimisation scores
#-----------------------------------------------------
score_list = []
cds_list = []
f = open("_".join([prefix, "1000_seqs.fa"]), "w+")
for i in range(0, 10000, 1):
new_cds = optimise_rand(prot)
seq_score, max = score_seq(new_cds, vd_table_obj)
# print seq_score
cds_list.append(new_cds)
score_list.append(str(round(seq_score, 2)))
f.write(">cds_" + str(i) + "_" + str(seq_score))
f.write(new_cds)
f.close()
f = open("_".join([prefix, "1000_scores.tsv"]), "w+")
f.write("\n".join(score_list))
f.close()
midpoint_score = sorted(score_list)[500]
sorted_cds = [x for _,x in sorted(zip(score_list,cds_list))]
midpoint_cds = sorted_cds[500]
print("midpoint sequence:")
print midpoint_score
print midpoint_cds
#-----------------------------------------------------
# Step X
# Score the pre-optimised sequence
#-----------------------------------------------------
print("Score of the pre-optimised sequence:")
for record in cds_records:
print record.id
old_cds = str(record.seq)
old_cds = old_cds.replace('T', 'U')
# print old_cds
seq_score, max = score_seq(old_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
# print(score_list)
# #set matplotlib to use a backend suitable for headless operation
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#
# plt.hist(score_list, bins='auto')
# out='tmp.png'
# plt.savefig(out, dpi=300, bbox_inches='tight')
# rng = np.random.RandomState(10) # deterministic random data
# a = np.hstack((rng.normal(size=1000),
# rng.normal(loc=5, scale=2, size=1000)))
| random_codon | identifier_name |
score_codons.py | #!/usr/bin/python
'''
This script generates a codon optimised protein based upon a fasta protein
sequence and a table of relative codon usage.
'''
from sets import Set
import sys,argparse
from collections import defaultdict
import re
import numpy as np
import csv
import random
from Bio import SeqIO
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
#These commands use the argparse module to import files specified in the command line
ap = argparse.ArgumentParser()
ap.add_argument('--fasta_aa',required=True,type=str,help='protein sequence for conversion')
ap.add_argument('--fasta_cds',required=True,type=str,help='cds for conversion')
ap.add_argument('--codon_table',required=True,type=str,help='text file containing codon usage table')
ap.add_argument('--prefix',required=True,type=str,help='output directory/filename prefix for output files')
conf = ap.parse_args()
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
class AA_weight_obj(object):
"""
"""
def __init__(self, aa):
""" """
self.aa = aa
self.weightings = defaultdict(float)
self.weightings_adj = defaultdict(float)
self.max = float()
self.optimal = str()
self.codons = []
self.sorted_adj_weightings = []
self.sorted_codons = []
self.weight_list = []
self.weight_list_adj = []
def add_weight(self, codon, weight):
""" """
# print codon
# print weight
self.weightings[codon] = float(weight)
# if float(weight) > self.max:
# self.max = float(weight)
# self.optimal = codon
self.codons.append(codon)
self.weight_list.append(weight)
def random_codon(self):
""" """
num_codons = len(self.codons)
r = float(random.randrange(0,10000, 1))
# r = float(random.randrange(0,num_codons*100, 1))
# print (self.aa)
# print(r)
r = np.divide(r, 10000)
# r = np.divide(r, 100)
# print(" of max ".join([str(r), str(num_codons)]))
for x,y in zip(self.codons,self.sorted_adj_weightings):
# print(" - ".join([str(r), str(x), str(y)]))
selected_codon = x
if float(y) >= float(r):
break
else:
r = r - float(y)
return selected_codon
def get_opt(self):
""" """
# sorted_weightings = sorted(self.weight_list)
# sorted_codons = [x for _,x in sorted(zip(self.weight_list,self.codons))]
# print sorted_weightings
# print sorted_codons
# return sorted_codons[-1]
return self.sorted_codons[-1]
def adjust_weight(self):
""" """
num_codons = len(self.weight_list)
# print num_codons
# print(self.weight_list)
self.weight_list_adj = [round(np.divide(float(x), num_codons),5) for x in self.weight_list]
# print self.weight_list_adj
self.sorted_adj_weightings = sorted(self.weight_list_adj)
self.sorted_codons = [x for _,x in sorted(zip(self.weight_list_adj,self.codons))]
for x,y in zip(self.sorted_codons, self.sorted_adj_weightings):
self.weightings_adj[x] = y
self.max = self.sorted_adj_weightings[-1]
class CodonTab_obj(object):
|
def optimise_rand(prot):
new_seq = ''
for aa in prot:
new_aa = vd_table_obj.weighting_dict[aa][0].random_codon()
new_seq = new_seq + new_aa
return(new_seq)
def optimise_best(prot):
new_seq = ''
for aa in prot:
# print aa
# new_aa = vd_table_obj.weighting_dict[aa][0].get_opt()
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[-1]
new_seq = new_seq + new_aa
return(new_seq)
def optimise_worst(prot):
new_seq = ''
for aa in prot:
# print aa
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[0]
new_seq = new_seq + new_aa
return(new_seq)
def score_seq(seq, table_obj):
codons = [seq[i:i+3] for i in range(0, len(seq), 3)]
total_score = float(0)
total_max = float(0)
for codon in codons:
aa = table_obj.codon_dict[codon]
score = table_obj.weighting_dict[aa][0].weightings_adj[codon]
# score = score - table_obj.weighting_dict[aa][0].weight_list_adj[0]
max = table_obj.weighting_dict[aa][0].max
total_score = total_score + score
total_max = total_max + max
return [round(np.divide(total_score, total_max), 2), round(np.divide(total_max, total_max), 2)]
# scores = []
# for aa in seq.split(''):
# scores.append(score_dict[aa])
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
seq_records = list(SeqIO.parse(conf.fasta_aa, "fasta"))
cds_records = list(SeqIO.parse(conf.fasta_cds, "fasta"))
prefix = conf.prefix
with open(conf.codon_table) as f:
table_lines = []
for line in f.readlines():
table_lines.append(line.rstrip())
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
record = seq_records[0]
# print record
prot = record.seq
# prot = 'MVSKGEEDNMAIIKEFMRFKVHMEGSVNGHEFEIEGEGEGRPYEGTQTAKLKVTKGGPLPFAWDILSPQFMYGSKAYVKHPADIPDYLKLSFPEGFKWERVMNFEDGGVVTVTQDSSLQDGEFIYKVKLRGTNFPSDGPVMQKKTMGWEASSERMYPEDGALKGEIKQRLKLKDGGHYDAEVKTTYKAKKPVQLPGAYNVNIKLDITSHNEDYTIVEQYERAEGRHSTGGMDELYK'
table = "".join(table_lines)
# table = 'UUU: 0.55; UCU: 0.85; UAU: 0.40; UGU: 0.44; UUC: 1.45; UCC: 1.41; UAC: 1.60; UGC: 1.56; UUA: 0.07; UCA: 0.51; UAA: 1.04; UGA: 1.06; UUG: 0.55; UCG: 1.36; UAG: 0.90; UGG: 1.00; CUU: 0.84; CCU: 0.93; CAU: 0.50; CGU: 0.97; CUC: 2.49; CCC: 1.66; CAC: 1.50; CGC: 2.45; CUA: 0.23; CCA: 0.53; CAA: 0.50; CGA: 0.75; CUG: 1.81; CCG: 0.89; CAG: 1.50; CGG: 0.71; AUU: 0.95; ACU: 0.58; AAU: 0.37; AGU: 0.39; AUC: 1.91; ACC: 1.62; AAC: 1.63; AGC: 1.49; AUA: 0.14; ACA: 0.58; AAA: 0.26; AGA: 0.36; AUG: 1.00; ACG: 1.22; AAG: 1.74; AGG: 0.76; GUU: 0.73; GCU: 0.80; GAU: 0.61; GGU: 0.91; GUC: 2.20; GCC: 1.98; GAC: 1.39; GGC: 2.32; GUA: 0.18; GCA: 0.44; GAA: 0.48; GGA: 0.46; GUG: 0.88; GCG: 0.77; GAG: 1.52; GGG: 0.31'
vd_table_obj = CodonTab_obj()
vd_table_obj.add_table(table)
# for k in vd_table_obj.weighting_dict.keys():
# print(vd_table_obj.weighting_dict[k][0].weightings)
# print(prot)
#-----------------------------------------------------
# Step X
# Optimise codons - random weightings
#-----------------------------------------------------
print("randomised codons:")
new_cds = optimise_rand(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - optimum codons
#-----------------------------------------------------
print("optimum sequence:")
new_cds = optimise_best(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - worst codons
#-----------------------------------------------------
print("worst sequence:")
new_cds = optimise_worst(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Score 1000 sequences for optimisation scores
#-----------------------------------------------------
score_list = []
cds_list = []
f = open("_".join([prefix, "1000_seqs.fa"]), "w+")
for i in range(0, 10000, 1):
new_cds = optimise_rand(prot)
seq_score, max = score_seq(new_cds, vd_table_obj)
# print seq_score
cds_list.append(new_cds)
score_list.append(str(round(seq_score, 2)))
f.write(">cds_" + str(i) + "_" + str(seq_score))
f.write(new_cds)
f.close()
f = open("_".join([prefix, "1000_scores.tsv"]), "w+")
f.write("\n".join(score_list))
f.close()
midpoint_score = sorted(score_list)[500]
sorted_cds = [x for _,x in sorted(zip(score_list,cds_list))]
midpoint_cds = sorted_cds[500]
print("midpoint sequence:")
print midpoint_score
print midpoint_cds
#-----------------------------------------------------
# Step X
# Score the pre-optimised sequence
#-----------------------------------------------------
print("Score of the pre-optimised sequence:")
for record in cds_records:
print record.id
old_cds = str(record.seq)
old_cds = old_cds.replace('T', 'U')
# print old_cds
seq_score, max = score_seq(old_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
# print(score_list)
# #set matplotlib to use a backend suitable for headless operation
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#
# plt.hist(score_list, bins='auto')
# out='tmp.png'
# plt.savefig(out, dpi=300, bbox_inches='tight')
# rng = np.random.RandomState(10) # deterministic random data
# a = np.hstack((rng.normal(size=1000),
# rng.normal(loc=5, scale=2, size=1000)))
| """
"""
def __init__(self):
"""Return a Expression_obj whose name is *gene_id*"""
# self.organism = []
self.weighting_dict = defaultdict(list)
# self.codon_obj_dict = {}
self.codon_dict = {
'UUU':'F','UUC':'F',
'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',
'AUU':'I','AUC':'I','AUA':'I',
'AUG':'M',
'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',
'UCU':'S','UCC':'S','UCA':'S','UCG':'S',
'CCU':'P','CCC':'P','CCA':'P','CCG':'P',
'ACU':'T','ACC':'T','ACA':'T','ACG':'T',
'GCU':'A','GCC':'A','GCA':'A','GCG':'A',
'UAU':'Y','UAC':'Y',
'UAA':'X','UAG':'X',
'CAU':'H','CAC':'H',
'CAA':'Q','CAG':'Q',
'AAU':'N','AAC':'N',
'AAA':'K','AAG':'K',
'GAU':'D','GAC':'D',
'GAA':'E','GAG':'E',
'UGU':'C','UGC':'C',
'UGA':'X',
'UGG':'W',
'CGU':'R','CGC':'R','CGA':'R','CGG':'R',
'AGU':'S','AGC':'S',
'AGA':'R','AGG':'R',
'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'
}
def add_table(self, table):
""""""
table = table.replace(' ', '')
table_lines = table.split(';')
for line in table_lines:
split_line = line.split(':')
codon = split_line[0]
# print codon
weighting = split_line[1]
# print weighting
aa = self.codon_dict[codon]
if self.weighting_dict[aa] and self.weighting_dict[aa][0]:
obj = self.weighting_dict[aa][0]
# print obj.weightings
else:
obj = AA_weight_obj(aa)
obj.add_weight(codon, weighting)
self.weighting_dict[aa].append(obj)
for aa in self.weighting_dict.keys():
self.weighting_dict[aa][0].adjust_weight() | identifier_body |
score_codons.py | #!/usr/bin/python
'''
This script generates a codon optimised protein based upon a fasta protein
sequence and a table of relative codon usage.
'''
from sets import Set
import sys,argparse
from collections import defaultdict
import re
import numpy as np
import csv
import random
from Bio import SeqIO
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
#These commands use the argparse module to import files specified in the command line
ap = argparse.ArgumentParser()
ap.add_argument('--fasta_aa',required=True,type=str,help='protein sequence for conversion')
ap.add_argument('--fasta_cds',required=True,type=str,help='cds for conversion')
ap.add_argument('--codon_table',required=True,type=str,help='text file containing codon usage table')
ap.add_argument('--prefix',required=True,type=str,help='output directory/filename prefix for output files')
conf = ap.parse_args()
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
class AA_weight_obj(object):
"""
"""
def __init__(self, aa):
""" """
self.aa = aa
self.weightings = defaultdict(float)
self.weightings_adj = defaultdict(float)
self.max = float()
self.optimal = str()
self.codons = []
self.sorted_adj_weightings = []
self.sorted_codons = []
self.weight_list = []
self.weight_list_adj = []
def add_weight(self, codon, weight):
""" """
# print codon
# print weight
self.weightings[codon] = float(weight)
# if float(weight) > self.max:
# self.max = float(weight)
# self.optimal = codon
self.codons.append(codon)
self.weight_list.append(weight)
def random_codon(self):
""" """
num_codons = len(self.codons)
r = float(random.randrange(0,10000, 1))
# r = float(random.randrange(0,num_codons*100, 1))
# print (self.aa)
# print(r)
r = np.divide(r, 10000)
# r = np.divide(r, 100)
# print(" of max ".join([str(r), str(num_codons)]))
for x,y in zip(self.codons,self.sorted_adj_weightings):
# print(" - ".join([str(r), str(x), str(y)]))
selected_codon = x
if float(y) >= float(r):
break
else:
r = r - float(y)
return selected_codon
def get_opt(self):
""" """
# sorted_weightings = sorted(self.weight_list)
# sorted_codons = [x for _,x in sorted(zip(self.weight_list,self.codons))]
# print sorted_weightings
# print sorted_codons
# return sorted_codons[-1]
return self.sorted_codons[-1]
def adjust_weight(self):
""" """
num_codons = len(self.weight_list)
# print num_codons
# print(self.weight_list)
self.weight_list_adj = [round(np.divide(float(x), num_codons),5) for x in self.weight_list]
# print self.weight_list_adj
self.sorted_adj_weightings = sorted(self.weight_list_adj)
self.sorted_codons = [x for _,x in sorted(zip(self.weight_list_adj,self.codons))]
for x,y in zip(self.sorted_codons, self.sorted_adj_weightings):
self.weightings_adj[x] = y
self.max = self.sorted_adj_weightings[-1]
class CodonTab_obj(object):
"""
"""
def __init__(self):
"""Return a Expression_obj whose name is *gene_id*"""
# self.organism = []
self.weighting_dict = defaultdict(list)
# self.codon_obj_dict = {}
self.codon_dict = {
'UUU':'F','UUC':'F',
'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',
'AUU':'I','AUC':'I','AUA':'I',
'AUG':'M',
'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',
'UCU':'S','UCC':'S','UCA':'S','UCG':'S',
'CCU':'P','CCC':'P','CCA':'P','CCG':'P',
'ACU':'T','ACC':'T','ACA':'T','ACG':'T',
'GCU':'A','GCC':'A','GCA':'A','GCG':'A',
'UAU':'Y','UAC':'Y',
'UAA':'X','UAG':'X',
'CAU':'H','CAC':'H',
'CAA':'Q','CAG':'Q',
'AAU':'N','AAC':'N',
'AAA':'K','AAG':'K',
'GAU':'D','GAC':'D',
'GAA':'E','GAG':'E',
'UGU':'C','UGC':'C',
'UGA':'X',
'UGG':'W',
'CGU':'R','CGC':'R','CGA':'R','CGG':'R',
'AGU':'S','AGC':'S',
'AGA':'R','AGG':'R',
'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'
}
def add_table(self, table):
""""""
table = table.replace(' ', '')
table_lines = table.split(';')
for line in table_lines:
split_line = line.split(':')
codon = split_line[0]
# print codon
weighting = split_line[1]
# print weighting
aa = self.codon_dict[codon]
if self.weighting_dict[aa] and self.weighting_dict[aa][0]:
obj = self.weighting_dict[aa][0]
# print obj.weightings
else:
obj = AA_weight_obj(aa)
obj.add_weight(codon, weighting)
self.weighting_dict[aa].append(obj)
for aa in self.weighting_dict.keys():
self.weighting_dict[aa][0].adjust_weight()
def optimise_rand(prot):
new_seq = ''
for aa in prot:
new_aa = vd_table_obj.weighting_dict[aa][0].random_codon()
new_seq = new_seq + new_aa
return(new_seq)
def optimise_best(prot):
new_seq = ''
for aa in prot:
# print aa
# new_aa = vd_table_obj.weighting_dict[aa][0].get_opt()
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[-1]
new_seq = new_seq + new_aa
return(new_seq)
def optimise_worst(prot):
new_seq = ''
for aa in prot:
# print aa
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[0]
new_seq = new_seq + new_aa | total_score = float(0)
total_max = float(0)
for codon in codons:
aa = table_obj.codon_dict[codon]
score = table_obj.weighting_dict[aa][0].weightings_adj[codon]
# score = score - table_obj.weighting_dict[aa][0].weight_list_adj[0]
max = table_obj.weighting_dict[aa][0].max
total_score = total_score + score
total_max = total_max + max
return [round(np.divide(total_score, total_max), 2), round(np.divide(total_max, total_max), 2)]
# scores = []
# for aa in seq.split(''):
# scores.append(score_dict[aa])
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
seq_records = list(SeqIO.parse(conf.fasta_aa, "fasta"))
cds_records = list(SeqIO.parse(conf.fasta_cds, "fasta"))
prefix = conf.prefix
with open(conf.codon_table) as f:
table_lines = []
for line in f.readlines():
table_lines.append(line.rstrip())
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
record = seq_records[0]
# print record
prot = record.seq
# prot = 'MVSKGEEDNMAIIKEFMRFKVHMEGSVNGHEFEIEGEGEGRPYEGTQTAKLKVTKGGPLPFAWDILSPQFMYGSKAYVKHPADIPDYLKLSFPEGFKWERVMNFEDGGVVTVTQDSSLQDGEFIYKVKLRGTNFPSDGPVMQKKTMGWEASSERMYPEDGALKGEIKQRLKLKDGGHYDAEVKTTYKAKKPVQLPGAYNVNIKLDITSHNEDYTIVEQYERAEGRHSTGGMDELYK'
table = "".join(table_lines)
# table = 'UUU: 0.55; UCU: 0.85; UAU: 0.40; UGU: 0.44; UUC: 1.45; UCC: 1.41; UAC: 1.60; UGC: 1.56; UUA: 0.07; UCA: 0.51; UAA: 1.04; UGA: 1.06; UUG: 0.55; UCG: 1.36; UAG: 0.90; UGG: 1.00; CUU: 0.84; CCU: 0.93; CAU: 0.50; CGU: 0.97; CUC: 2.49; CCC: 1.66; CAC: 1.50; CGC: 2.45; CUA: 0.23; CCA: 0.53; CAA: 0.50; CGA: 0.75; CUG: 1.81; CCG: 0.89; CAG: 1.50; CGG: 0.71; AUU: 0.95; ACU: 0.58; AAU: 0.37; AGU: 0.39; AUC: 1.91; ACC: 1.62; AAC: 1.63; AGC: 1.49; AUA: 0.14; ACA: 0.58; AAA: 0.26; AGA: 0.36; AUG: 1.00; ACG: 1.22; AAG: 1.74; AGG: 0.76; GUU: 0.73; GCU: 0.80; GAU: 0.61; GGU: 0.91; GUC: 2.20; GCC: 1.98; GAC: 1.39; GGC: 2.32; GUA: 0.18; GCA: 0.44; GAA: 0.48; GGA: 0.46; GUG: 0.88; GCG: 0.77; GAG: 1.52; GGG: 0.31'
vd_table_obj = CodonTab_obj()
vd_table_obj.add_table(table)
# for k in vd_table_obj.weighting_dict.keys():
# print(vd_table_obj.weighting_dict[k][0].weightings)
# print(prot)
#-----------------------------------------------------
# Step X
# Optimise codons - random weightings
#-----------------------------------------------------
print("randomised codons:")
new_cds = optimise_rand(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - optimum codons
#-----------------------------------------------------
print("optimum sequence:")
new_cds = optimise_best(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - worst codons
#-----------------------------------------------------
print("worst sequence:")
new_cds = optimise_worst(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Score 1000 sequences for optimisation scores
#-----------------------------------------------------
score_list = []
cds_list = []
f = open("_".join([prefix, "1000_seqs.fa"]), "w+")
for i in range(0, 10000, 1):
new_cds = optimise_rand(prot)
seq_score, max = score_seq(new_cds, vd_table_obj)
# print seq_score
cds_list.append(new_cds)
score_list.append(str(round(seq_score, 2)))
f.write(">cds_" + str(i) + "_" + str(seq_score))
f.write(new_cds)
f.close()
f = open("_".join([prefix, "1000_scores.tsv"]), "w+")
f.write("\n".join(score_list))
f.close()
midpoint_score = sorted(score_list)[500]
sorted_cds = [x for _,x in sorted(zip(score_list,cds_list))]
midpoint_cds = sorted_cds[500]
print("midpoint sequence:")
print midpoint_score
print midpoint_cds
#-----------------------------------------------------
# Step X
# Score the pre-optimised sequence
#-----------------------------------------------------
print("Score of the pre-optimised sequence:")
for record in cds_records:
print record.id
old_cds = str(record.seq)
old_cds = old_cds.replace('T', 'U')
# print old_cds
seq_score, max = score_seq(old_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
# print(score_list)
# #set matplotlib to use a backend suitable for headless operation
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#
# plt.hist(score_list, bins='auto')
# out='tmp.png'
# plt.savefig(out, dpi=300, bbox_inches='tight')
# rng = np.random.RandomState(10) # deterministic random data
# a = np.hstack((rng.normal(size=1000),
# rng.normal(loc=5, scale=2, size=1000))) | return(new_seq)
def score_seq(seq, table_obj):
codons = [seq[i:i+3] for i in range(0, len(seq), 3)] | random_line_split |
score_codons.py | #!/usr/bin/python
'''
This script generates a codon optimised protein based upon a fasta protein
sequence and a table of relative codon usage.
'''
from sets import Set
import sys,argparse
from collections import defaultdict
import re
import numpy as np
import csv
import random
from Bio import SeqIO
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
#These commands use the argparse module to import files specified in the command line
ap = argparse.ArgumentParser()
ap.add_argument('--fasta_aa',required=True,type=str,help='protein sequence for conversion')
ap.add_argument('--fasta_cds',required=True,type=str,help='cds for conversion')
ap.add_argument('--codon_table',required=True,type=str,help='text file containing codon usage table')
ap.add_argument('--prefix',required=True,type=str,help='output directory/filename prefix for output files')
conf = ap.parse_args()
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
class AA_weight_obj(object):
"""
"""
def __init__(self, aa):
""" """
self.aa = aa
self.weightings = defaultdict(float)
self.weightings_adj = defaultdict(float)
self.max = float()
self.optimal = str()
self.codons = []
self.sorted_adj_weightings = []
self.sorted_codons = []
self.weight_list = []
self.weight_list_adj = []
def add_weight(self, codon, weight):
""" """
# print codon
# print weight
self.weightings[codon] = float(weight)
# if float(weight) > self.max:
# self.max = float(weight)
# self.optimal = codon
self.codons.append(codon)
self.weight_list.append(weight)
def random_codon(self):
""" """
num_codons = len(self.codons)
r = float(random.randrange(0,10000, 1))
# r = float(random.randrange(0,num_codons*100, 1))
# print (self.aa)
# print(r)
r = np.divide(r, 10000)
# r = np.divide(r, 100)
# print(" of max ".join([str(r), str(num_codons)]))
for x,y in zip(self.codons,self.sorted_adj_weightings):
# print(" - ".join([str(r), str(x), str(y)]))
selected_codon = x
if float(y) >= float(r):
break
else:
r = r - float(y)
return selected_codon
def get_opt(self):
""" """
# sorted_weightings = sorted(self.weight_list)
# sorted_codons = [x for _,x in sorted(zip(self.weight_list,self.codons))]
# print sorted_weightings
# print sorted_codons
# return sorted_codons[-1]
return self.sorted_codons[-1]
def adjust_weight(self):
""" """
num_codons = len(self.weight_list)
# print num_codons
# print(self.weight_list)
self.weight_list_adj = [round(np.divide(float(x), num_codons),5) for x in self.weight_list]
# print self.weight_list_adj
self.sorted_adj_weightings = sorted(self.weight_list_adj)
self.sorted_codons = [x for _,x in sorted(zip(self.weight_list_adj,self.codons))]
for x,y in zip(self.sorted_codons, self.sorted_adj_weightings):
self.weightings_adj[x] = y
self.max = self.sorted_adj_weightings[-1]
class CodonTab_obj(object):
"""
"""
def __init__(self):
"""Return a Expression_obj whose name is *gene_id*"""
# self.organism = []
self.weighting_dict = defaultdict(list)
# self.codon_obj_dict = {}
self.codon_dict = {
'UUU':'F','UUC':'F',
'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',
'AUU':'I','AUC':'I','AUA':'I',
'AUG':'M',
'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',
'UCU':'S','UCC':'S','UCA':'S','UCG':'S',
'CCU':'P','CCC':'P','CCA':'P','CCG':'P',
'ACU':'T','ACC':'T','ACA':'T','ACG':'T',
'GCU':'A','GCC':'A','GCA':'A','GCG':'A',
'UAU':'Y','UAC':'Y',
'UAA':'X','UAG':'X',
'CAU':'H','CAC':'H',
'CAA':'Q','CAG':'Q',
'AAU':'N','AAC':'N',
'AAA':'K','AAG':'K',
'GAU':'D','GAC':'D',
'GAA':'E','GAG':'E',
'UGU':'C','UGC':'C',
'UGA':'X',
'UGG':'W',
'CGU':'R','CGC':'R','CGA':'R','CGG':'R',
'AGU':'S','AGC':'S',
'AGA':'R','AGG':'R',
'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'
}
def add_table(self, table):
""""""
table = table.replace(' ', '')
table_lines = table.split(';')
for line in table_lines:
split_line = line.split(':')
codon = split_line[0]
# print codon
weighting = split_line[1]
# print weighting
aa = self.codon_dict[codon]
if self.weighting_dict[aa] and self.weighting_dict[aa][0]:
obj = self.weighting_dict[aa][0]
# print obj.weightings
else:
obj = AA_weight_obj(aa)
obj.add_weight(codon, weighting)
self.weighting_dict[aa].append(obj)
for aa in self.weighting_dict.keys():
self.weighting_dict[aa][0].adjust_weight()
def optimise_rand(prot):
new_seq = ''
for aa in prot:
new_aa = vd_table_obj.weighting_dict[aa][0].random_codon()
new_seq = new_seq + new_aa
return(new_seq)
def optimise_best(prot):
new_seq = ''
for aa in prot:
# print aa
# new_aa = vd_table_obj.weighting_dict[aa][0].get_opt()
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[-1]
new_seq = new_seq + new_aa
return(new_seq)
def optimise_worst(prot):
new_seq = ''
for aa in prot:
# print aa
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[0]
new_seq = new_seq + new_aa
return(new_seq)
def score_seq(seq, table_obj):
codons = [seq[i:i+3] for i in range(0, len(seq), 3)]
total_score = float(0)
total_max = float(0)
for codon in codons:
aa = table_obj.codon_dict[codon]
score = table_obj.weighting_dict[aa][0].weightings_adj[codon]
# score = score - table_obj.weighting_dict[aa][0].weight_list_adj[0]
max = table_obj.weighting_dict[aa][0].max
total_score = total_score + score
total_max = total_max + max
return [round(np.divide(total_score, total_max), 2), round(np.divide(total_max, total_max), 2)]
# scores = []
# for aa in seq.split(''):
# scores.append(score_dict[aa])
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
seq_records = list(SeqIO.parse(conf.fasta_aa, "fasta"))
cds_records = list(SeqIO.parse(conf.fasta_cds, "fasta"))
prefix = conf.prefix
with open(conf.codon_table) as f:
table_lines = []
for line in f.readlines():
table_lines.append(line.rstrip())
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
record = seq_records[0]
# print record
prot = record.seq
# prot = 'MVSKGEEDNMAIIKEFMRFKVHMEGSVNGHEFEIEGEGEGRPYEGTQTAKLKVTKGGPLPFAWDILSPQFMYGSKAYVKHPADIPDYLKLSFPEGFKWERVMNFEDGGVVTVTQDSSLQDGEFIYKVKLRGTNFPSDGPVMQKKTMGWEASSERMYPEDGALKGEIKQRLKLKDGGHYDAEVKTTYKAKKPVQLPGAYNVNIKLDITSHNEDYTIVEQYERAEGRHSTGGMDELYK'
table = "".join(table_lines)
# table = 'UUU: 0.55; UCU: 0.85; UAU: 0.40; UGU: 0.44; UUC: 1.45; UCC: 1.41; UAC: 1.60; UGC: 1.56; UUA: 0.07; UCA: 0.51; UAA: 1.04; UGA: 1.06; UUG: 0.55; UCG: 1.36; UAG: 0.90; UGG: 1.00; CUU: 0.84; CCU: 0.93; CAU: 0.50; CGU: 0.97; CUC: 2.49; CCC: 1.66; CAC: 1.50; CGC: 2.45; CUA: 0.23; CCA: 0.53; CAA: 0.50; CGA: 0.75; CUG: 1.81; CCG: 0.89; CAG: 1.50; CGG: 0.71; AUU: 0.95; ACU: 0.58; AAU: 0.37; AGU: 0.39; AUC: 1.91; ACC: 1.62; AAC: 1.63; AGC: 1.49; AUA: 0.14; ACA: 0.58; AAA: 0.26; AGA: 0.36; AUG: 1.00; ACG: 1.22; AAG: 1.74; AGG: 0.76; GUU: 0.73; GCU: 0.80; GAU: 0.61; GGU: 0.91; GUC: 2.20; GCC: 1.98; GAC: 1.39; GGC: 2.32; GUA: 0.18; GCA: 0.44; GAA: 0.48; GGA: 0.46; GUG: 0.88; GCG: 0.77; GAG: 1.52; GGG: 0.31'
vd_table_obj = CodonTab_obj()
vd_table_obj.add_table(table)
# for k in vd_table_obj.weighting_dict.keys():
# print(vd_table_obj.weighting_dict[k][0].weightings)
# print(prot)
#-----------------------------------------------------
# Step X
# Optimise codons - random weightings
#-----------------------------------------------------
print("randomised codons:")
new_cds = optimise_rand(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - optimum codons
#-----------------------------------------------------
print("optimum sequence:")
new_cds = optimise_best(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - worst codons
#-----------------------------------------------------
print("worst sequence:")
new_cds = optimise_worst(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Score 1000 sequences for optimisation scores
#-----------------------------------------------------
score_list = []
cds_list = []
f = open("_".join([prefix, "1000_seqs.fa"]), "w+")
for i in range(0, 10000, 1):
|
f.close()
f = open("_".join([prefix, "1000_scores.tsv"]), "w+")
f.write("\n".join(score_list))
f.close()
midpoint_score = sorted(score_list)[500]
sorted_cds = [x for _,x in sorted(zip(score_list,cds_list))]
midpoint_cds = sorted_cds[500]
print("midpoint sequence:")
print midpoint_score
print midpoint_cds
#-----------------------------------------------------
# Step X
# Score the pre-optimised sequence
#-----------------------------------------------------
print("Score of the pre-optimised sequence:")
for record in cds_records:
print record.id
old_cds = str(record.seq)
old_cds = old_cds.replace('T', 'U')
# print old_cds
seq_score, max = score_seq(old_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
# print(score_list)
# #set matplotlib to use a backend suitable for headless operation
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#
# plt.hist(score_list, bins='auto')
# out='tmp.png'
# plt.savefig(out, dpi=300, bbox_inches='tight')
# rng = np.random.RandomState(10) # deterministic random data
# a = np.hstack((rng.normal(size=1000),
# rng.normal(loc=5, scale=2, size=1000)))
| new_cds = optimise_rand(prot)
seq_score, max = score_seq(new_cds, vd_table_obj)
# print seq_score
cds_list.append(new_cds)
score_list.append(str(round(seq_score, 2)))
f.write(">cds_" + str(i) + "_" + str(seq_score))
f.write(new_cds) | conditional_block |
easyPresentation.py | # !/usr/bin/python
# This tool converts text file to a html looks like a presenation.
# -*- coding: utf-8 -*-
# Version 1.0 05/01/2015
# ***************************************************************************
# * Copyright (C) 2015, Varun Srinivas Chakravarthi Nalluri *
# * *
# * This program is free software; any one can redistribute it and/or *
# * modify it under the terms of the GNU General Public License as *
# * published by the Free Software Foundation; either version 2 of the *
# * License, or (at your option) any later version. * *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
# * *
# * Purpose : This program accepts a text file and few arguments as *
# * input and generates a output folder, which contains a html file and *
# * its supporting files like js, css etc. This html file contains the *
# * presentation in the form of html content, which can be opened with *
# * any browser. *
# * *
# ***************************************************************************
import os
import sys
import getopt
import codecs
import shutil
import markdown
import tempfile
from subprocess import Popen, PIPE
def main(argv):
template_id = 1
custom_markdown_args = ""
path = "output" + os.path.sep
input_file_name = ""
input_textfile_path = ""
out_file_name = "easyPresentation.html"
argc = len(argv)
# checking if minimum number of arguments required passed
if argc < 1:
print ("Inavlid number of arguments passed\n")
print ("Please use -h or --help for help")
sys.exit(2)
# Reading path of program being run
meta_path = os.path.dirname(os.path.abspath(sys.argv[0])) + os.path.sep
# Reading passed arguments
try:
opts, args = getopt.getopt(argv, 'h:o:t:f:m:d', ["help", "outpath=","template=","filename=","markdown="])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(args) > 0:
input_file_name = args[0]
else:
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ('-m',"--markdown"):
# (kp) use consistent space around operators. if '=' has spaces around it, so should '+'
custom_markdown_args = arg
print("Custom markdown call identified...")
elif opt in ('-o',"--outpath"):
|
elif opt in ('-t',"--template"):
template_id = arg
elif opt in ('-f',"--filename"):
out_file_name = arg
else:
print ('unhandled option %s',opt)
# checking if non optional arguments are passed.
if input_file_name == "":
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
if not path.endswith(os.path.sep):
path = path + os.path.sep
input_textfile_path = os.path.dirname(os.path.abspath(input_file_name)) + os.path.sep
# opening input txt file
f = open(input_file_name,"r")
# Loading template configuration
templateConfig = load_config(template_id,meta_path)
# reqChangePath(path)
# copying all the required files to output path specified
path = copy_files(meta_path + templateConfig["Template path"], path, templateConfig["Template name"])
if templateConfig == {}:
print ("SYS ERR :: INVALID TEMPLATE ID")
sys.exit(1)
# reading the template file
template = open (meta_path + templateConfig["Template path"] + templateConfig["Template name"],"r")
htmlContent = template.read()
# This is very important. This perticular string should be present in template and this is where all the generated div blocks need to be placed
htmlDataPart1, htmlDataPart2 = htmlContent.split('--slide content--', 1)
index = htmlDataPart1.find("</head>")
if index == -1:
index = htmlDataPart1.find("<body")
htmlDataPart1 = htmlDataPart1[:index] + " <link rel=\"stylesheet\" type=\"text/css\" href=\"css/slideCustomCSS.css\"> " + htmlDataPart1[index:]
template.close()
data = f.read()
# Formatting the input text and converting it to html
addStyles(data, path, input_textfile_path)
data = data[data.find("~slidestart"):]
data = convertTextToHtml(data, custom_markdown_args)
data = data.replace("~slidestart",templateConfig["current slide"],1)
data = data.replace("~slidestart",templateConfig["all slides"])
data = data.replace("~slideend","</div>")
data = data.replace("\~slidestart","~slidestart")
data = data.replace("\~slideend","~slideend")
output = convertTextToHtml(data, custom_markdown_args)
# writing all the changes to output file
output = htmlDataPart1 + output + htmlDataPart2
output_file = codecs.open(path+out_file_name, "w", encoding="utf-8", errors="xmlcharrefreplace")
output_file.write(output)
# Close the file
f.close()
# Opens a file
def open_file(file_name, mode):
file_content = None
try:
file_content = open(file_name, mode)
except (OSError, IOError) as e:
print('Error occured while opening file.\n Error: %s' % e)
return file_content
# print directions to use this tool
def usage():
print ("Usage: python3 easyPresentation.py [OPTIONS] <input_filename>")
print (" Note: All the optional arguments must be given before input text file.\n")
print ("-h, --help \t\t display this help and exit")
print ("-o, --outpath \t\t Copy all output files to specified path, \n\t\t\t if no path is specified by default 'outfolder' will be created in current directory")
print ("-f, --filename \t\t change primary out put file name")
print ("-t, --template \t\t select template")
print ("-m, --markdown \t\t specify custom markdown text to html conversion tool\n")
print ("Sorry for the mess, I'm still under development!!")
# Identifying path related special characters and resolving obsolute path, I am not sure if this is needed. But still keeping this for later use
def reqChangePath(path):
# (kp) this check is wrong. hidden files on unix filesystems start with '.' so startswith('.') is not a good check for current directory
# (kp) this is actually the same bug that Ken Thompson had in the original unix impl of the filesystem which lead to dotfiles being hidden in the first place
if path.startswith('.') or path.startswith('~'):
p = Popen("pwd", stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if rc != 0:
print("Invalid cmd")
else:
path = output + path[1.]
if not os.path.exists(path):
os.makedirs(path)
# Read configuration of selected templete
def load_config(tnum, file_loc):
flag = 0
templateConfig = {}
templet = open_file (file_loc+"config","r")
for line in templet :
if line.strip().startswith("Template id") and flag == 1 :
return templateConfig
if line.strip().startswith("Template id") and flag == 0 :
if int(tnum) == int(line.split(':',1)[1].strip()):
flag = 1
if flag == 1 and line.strip() != "":
key = line.split(':',1)[0].strip()
value = line.split(':',1)[1].strip()
if key == "current slide" or key == "all slides":
value = value[:value.find('>')] + " class=\"customSlideCSS\" " + ">"
# value = value + "<div class=\"customSlideCSS\">"
templateConfig[key] = value
return templateConfig
# Ignore specified files while copying
def ignore_function(ignore):
def _ignore_(path, names):
ignored_names = []
if ignore in names:
ignored_names.append(ignore)
return set(ignored_names)
return _ignore_
# Copy all files from source to destination
def copy_files (src, dest, tname):
try:
# Checking if user selected destination directory is already exists
while os.path.exists(dest):
print ('destination directory '+dest+' already exits!!')
is_delete = input("Enter 'Y' to replace directory or 'N' to enter new destination path (Y/N): ")
if is_delete.upper() == 'Y':
shutil.rmtree(dest)
else:
dest = input('Enter new destination path to continue : ') # Reading new destination path
shutil.copytree(src, dest, ignore=ignore_function(tname))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
print('Directory not copied. Error: %s' % e)
return dest
# Creating CSS style class. Need to include CSS3 functionalities.
def addStyles(rawText, path, input_file_path):
preDefCSS = initStyles()
if not path.endswith(os.path.sep):
path = path + os.path.sep
if not os.path.exists(path + "css" + os.path.sep ):
os.makedirs(path + "css" + os.path.sep)
customCSS = open_file (path+"css" + os.path.sep + "slideCustomCSS.css",'w')
customCSS.write(".customSlideCSS { ")
styletext = rawText.split('~slidestart',1)[0]
if styletext != "":
styletext = styletext.replace("[","")
styletext = styletext.replace("]",";")
styles = styletext.split(";")
for style in styles:
if style.strip() != "":
key,value = style.split(':',1)
key = key.strip()
value = value.strip()
if key == "bgimage":
# Creating a folder inside css folder and copying back-ground image into that folder.
if not os.path.exists(path + "css" + os.path.sep + "bgimages" + os.path.sep):
os.makedirs(path+"css" + os.path.sep + " bgimages" + os.path.sep)
shutil.copy(input_file_path+value, path + "css" + os.path.sep + "bgimage")
value = "url(\"bgimage\")"
customCSS.write("{0} : {1};".format(preDefCSS[key],value))
customCSS.write("}")
customCSS.close()
# Initiating basic styles defined for this tool. We can add attribute to below list and use it.
def initStyles():
preDefCSS = {}
preDefCSS["bgimage"] = "background-image"
preDefCSS["bgcolor"] = "background-color"
preDefCSS["bgimage-repeat"] = "background-repeat"
preDefCSS["text-color"] = "color"
preDefCSS["font"] = "font"
preDefCSS["font-size"] = "font-size"
preDefCSS["bgimage-size"] = "background-size"
preDefCSS["left-margin"] = "padding-left"
preDefCSS["top-margin"] = "padding-top"
preDefCSS["bottom-margin"] = "padding-bottom"
preDefCSS["right-margin"] = "padding-right"
return preDefCSS
# Converts markdown text to html text
def convertTextToHtml(data, custom_markdown_args):
output = ""
if custom_markdown_args == "":
output = markdown.markdown(data) # Using imported python markdown
else:
if ' ' in custom_markdown_args:
markdown_cmd = custom_markdown_args.split(' ', 1)[0] # Using custome markdown argument passed through command line
markdown_arguments = custom_markdown_args.split(' ', 1)[1] + " "
else:
markdown_cmd = custom_markdown_args
markdown_arguments = " "
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(bytes(data,'UTF-8'))
temp.flush()
print("markdown_cmd : %s"% markdown_cmd, "markdown_args : %s"% markdown_arguments)
p = Popen(["markdown", markdown_arguments+temp.name], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
temp.close()
os.unlink(temp.name)
if rc != 0:
print (" Invalid markdown script!")
if err != "":
print(" Error while running markdown script : ")
print(err)
sys.exit(1)
else:
output = output.decode('utf-8')
return output
# begin main()
if __name__ == "__main__":
main(sys.argv[1:])
| path = arg
# reqChangePath(arg) | conditional_block |
easyPresentation.py | # !/usr/bin/python
# This tool converts text file to a html looks like a presenation.
# -*- coding: utf-8 -*-
# Version 1.0 05/01/2015
# ***************************************************************************
# * Copyright (C) 2015, Varun Srinivas Chakravarthi Nalluri *
# * *
# * This program is free software; any one can redistribute it and/or *
# * modify it under the terms of the GNU General Public License as *
# * published by the Free Software Foundation; either version 2 of the *
# * License, or (at your option) any later version. * *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
# * *
# * Purpose : This program accepts a text file and few arguments as *
# * input and generates a output folder, which contains a html file and *
# * its supporting files like js, css etc. This html file contains the *
# * presentation in the form of html content, which can be opened with *
# * any browser. *
# * *
# ***************************************************************************
import os
import sys
import getopt
import codecs
import shutil
import markdown
import tempfile
from subprocess import Popen, PIPE
def main(argv):
template_id = 1
custom_markdown_args = ""
path = "output" + os.path.sep
input_file_name = ""
input_textfile_path = ""
out_file_name = "easyPresentation.html"
argc = len(argv)
# checking if minimum number of arguments required passed
if argc < 1:
print ("Inavlid number of arguments passed\n")
print ("Please use -h or --help for help")
sys.exit(2)
# Reading path of program being run
meta_path = os.path.dirname(os.path.abspath(sys.argv[0])) + os.path.sep
# Reading passed arguments
try:
opts, args = getopt.getopt(argv, 'h:o:t:f:m:d', ["help", "outpath=","template=","filename=","markdown="])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(args) > 0:
input_file_name = args[0]
else:
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ('-m',"--markdown"):
# (kp) use consistent space around operators. if '=' has spaces around it, so should '+'
custom_markdown_args = arg
print("Custom markdown call identified...")
elif opt in ('-o',"--outpath"):
path = arg
# reqChangePath(arg)
elif opt in ('-t',"--template"):
template_id = arg
elif opt in ('-f',"--filename"):
out_file_name = arg
else:
print ('unhandled option %s',opt)
# checking if non optional arguments are passed.
if input_file_name == "":
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
if not path.endswith(os.path.sep):
path = path + os.path.sep
input_textfile_path = os.path.dirname(os.path.abspath(input_file_name)) + os.path.sep
# opening input txt file
f = open(input_file_name,"r")
# Loading template configuration
templateConfig = load_config(template_id,meta_path)
# reqChangePath(path)
# copying all the required files to output path specified
path = copy_files(meta_path + templateConfig["Template path"], path, templateConfig["Template name"])
if templateConfig == {}:
print ("SYS ERR :: INVALID TEMPLATE ID")
sys.exit(1)
# reading the template file
template = open (meta_path + templateConfig["Template path"] + templateConfig["Template name"],"r")
htmlContent = template.read()
# This is very important. This perticular string should be present in template and this is where all the generated div blocks need to be placed
htmlDataPart1, htmlDataPart2 = htmlContent.split('--slide content--', 1)
index = htmlDataPart1.find("</head>")
if index == -1:
index = htmlDataPart1.find("<body")
htmlDataPart1 = htmlDataPart1[:index] + " <link rel=\"stylesheet\" type=\"text/css\" href=\"css/slideCustomCSS.css\"> " + htmlDataPart1[index:]
template.close()
data = f.read()
# Formatting the input text and converting it to html
addStyles(data, path, input_textfile_path)
data = data[data.find("~slidestart"):]
data = convertTextToHtml(data, custom_markdown_args)
data = data.replace("~slidestart",templateConfig["current slide"],1)
data = data.replace("~slidestart",templateConfig["all slides"])
data = data.replace("~slideend","</div>")
data = data.replace("\~slidestart","~slidestart")
data = data.replace("\~slideend","~slideend")
output = convertTextToHtml(data, custom_markdown_args)
# writing all the changes to output file
output = htmlDataPart1 + output + htmlDataPart2
output_file = codecs.open(path+out_file_name, "w", encoding="utf-8", errors="xmlcharrefreplace")
output_file.write(output)
# Close the file
f.close()
# Opens a file
def open_file(file_name, mode):
file_content = None
try:
file_content = open(file_name, mode)
except (OSError, IOError) as e:
print('Error occured while opening file.\n Error: %s' % e)
return file_content
# print directions to use this tool
def usage():
print ("Usage: python3 easyPresentation.py [OPTIONS] <input_filename>")
print (" Note: All the optional arguments must be given before input text file.\n")
print ("-h, --help \t\t display this help and exit")
print ("-o, --outpath \t\t Copy all output files to specified path, \n\t\t\t if no path is specified by default 'outfolder' will be created in current directory")
print ("-f, --filename \t\t change primary out put file name")
print ("-t, --template \t\t select template")
print ("-m, --markdown \t\t specify custom markdown text to html conversion tool\n")
print ("Sorry for the mess, I'm still under development!!")
# Identifying path related special characters and resolving obsolute path, I am not sure if this is needed. But still keeping this for later use
def reqChangePath(path):
# (kp) this check is wrong. hidden files on unix filesystems start with '.' so startswith('.') is not a good check for current directory
# (kp) this is actually the same bug that Ken Thompson had in the original unix impl of the filesystem which lead to dotfiles being hidden in the first place
if path.startswith('.') or path.startswith('~'):
p = Popen("pwd", stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if rc != 0:
print("Invalid cmd")
else:
path = output + path[1.]
if not os.path.exists(path):
os.makedirs(path)
# Read configuration of selected templete
def load_config(tnum, file_loc):
flag = 0
templateConfig = {}
templet = open_file (file_loc+"config","r")
for line in templet :
if line.strip().startswith("Template id") and flag == 1 :
return templateConfig
if line.strip().startswith("Template id") and flag == 0 :
if int(tnum) == int(line.split(':',1)[1].strip()):
flag = 1
if flag == 1 and line.strip() != "":
key = line.split(':',1)[0].strip()
value = line.split(':',1)[1].strip()
if key == "current slide" or key == "all slides":
value = value[:value.find('>')] + " class=\"customSlideCSS\" " + ">"
# value = value + "<div class=\"customSlideCSS\">"
templateConfig[key] = value
return templateConfig
# Ignore specified files while copying
def ignore_function(ignore):
def _ignore_(path, names):
ignored_names = []
if ignore in names:
ignored_names.append(ignore)
return set(ignored_names)
return _ignore_
# Copy all files from source to destination
def copy_files (src, dest, tname):
try:
# Checking if user selected destination directory is already exists
while os.path.exists(dest):
print ('destination directory '+dest+' already exits!!')
is_delete = input("Enter 'Y' to replace directory or 'N' to enter new destination path (Y/N): ")
if is_delete.upper() == 'Y':
shutil.rmtree(dest)
else:
dest = input('Enter new destination path to continue : ') # Reading new destination path
shutil.copytree(src, dest, ignore=ignore_function(tname))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
print('Directory not copied. Error: %s' % e)
return dest
# Creating CSS style class. Need to include CSS3 functionalities.
def addStyles(rawText, path, input_file_path):
preDefCSS = initStyles()
if not path.endswith(os.path.sep):
path = path + os.path.sep
if not os.path.exists(path + "css" + os.path.sep ):
os.makedirs(path + "css" + os.path.sep)
customCSS = open_file (path+"css" + os.path.sep + "slideCustomCSS.css",'w')
customCSS.write(".customSlideCSS { ")
styletext = rawText.split('~slidestart',1)[0]
if styletext != "":
styletext = styletext.replace("[","")
styletext = styletext.replace("]",";")
styles = styletext.split(";")
for style in styles:
if style.strip() != "":
key,value = style.split(':',1)
key = key.strip()
value = value.strip()
if key == "bgimage":
# Creating a folder inside css folder and copying back-ground image into that folder.
if not os.path.exists(path + "css" + os.path.sep + "bgimages" + os.path.sep):
os.makedirs(path+"css" + os.path.sep + " bgimages" + os.path.sep)
shutil.copy(input_file_path+value, path + "css" + os.path.sep + "bgimage")
value = "url(\"bgimage\")"
customCSS.write("{0} : {1};".format(preDefCSS[key],value))
customCSS.write("}")
customCSS.close()
# Initiating basic styles defined for this tool. We can add attribute to below list and use it.
def initStyles():
|
# Converts markdown text to html text
def convertTextToHtml(data, custom_markdown_args):
output = ""
if custom_markdown_args == "":
output = markdown.markdown(data) # Using imported python markdown
else:
if ' ' in custom_markdown_args:
markdown_cmd = custom_markdown_args.split(' ', 1)[0] # Using custome markdown argument passed through command line
markdown_arguments = custom_markdown_args.split(' ', 1)[1] + " "
else:
markdown_cmd = custom_markdown_args
markdown_arguments = " "
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(bytes(data,'UTF-8'))
temp.flush()
print("markdown_cmd : %s"% markdown_cmd, "markdown_args : %s"% markdown_arguments)
p = Popen(["markdown", markdown_arguments+temp.name], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
temp.close()
os.unlink(temp.name)
if rc != 0:
print (" Invalid markdown script!")
if err != "":
print(" Error while running markdown script : ")
print(err)
sys.exit(1)
else:
output = output.decode('utf-8')
return output
# begin main()
if __name__ == "__main__":
main(sys.argv[1:])
| preDefCSS = {}
preDefCSS["bgimage"] = "background-image"
preDefCSS["bgcolor"] = "background-color"
preDefCSS["bgimage-repeat"] = "background-repeat"
preDefCSS["text-color"] = "color"
preDefCSS["font"] = "font"
preDefCSS["font-size"] = "font-size"
preDefCSS["bgimage-size"] = "background-size"
preDefCSS["left-margin"] = "padding-left"
preDefCSS["top-margin"] = "padding-top"
preDefCSS["bottom-margin"] = "padding-bottom"
preDefCSS["right-margin"] = "padding-right"
return preDefCSS | identifier_body |
easyPresentation.py | # !/usr/bin/python
# This tool converts text file to a html looks like a presenation.
# -*- coding: utf-8 -*-
# Version 1.0 05/01/2015
# ***************************************************************************
# * Copyright (C) 2015, Varun Srinivas Chakravarthi Nalluri *
# * *
# * This program is free software; any one can redistribute it and/or *
# * modify it under the terms of the GNU General Public License as *
# * published by the Free Software Foundation; either version 2 of the *
# * License, or (at your option) any later version. * *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
# * *
# * Purpose : This program accepts a text file and few arguments as *
# * input and generates a output folder, which contains a html file and *
# * its supporting files like js, css etc. This html file contains the *
# * presentation in the form of html content, which can be opened with *
# * any browser. *
# * *
# ***************************************************************************
import os
import sys
import getopt
import codecs
import shutil
import markdown
import tempfile
from subprocess import Popen, PIPE
def main(argv):
template_id = 1
custom_markdown_args = ""
path = "output" + os.path.sep
input_file_name = ""
input_textfile_path = ""
out_file_name = "easyPresentation.html"
argc = len(argv)
# checking if minimum number of arguments required passed
if argc < 1:
print ("Inavlid number of arguments passed\n")
print ("Please use -h or --help for help")
sys.exit(2)
# Reading path of program being run
meta_path = os.path.dirname(os.path.abspath(sys.argv[0])) + os.path.sep
# Reading passed arguments
try:
opts, args = getopt.getopt(argv, 'h:o:t:f:m:d', ["help", "outpath=","template=","filename=","markdown="])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(args) > 0:
input_file_name = args[0]
else:
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ('-m',"--markdown"):
# (kp) use consistent space around operators. if '=' has spaces around it, so should '+'
custom_markdown_args = arg
print("Custom markdown call identified...")
elif opt in ('-o',"--outpath"):
path = arg
# reqChangePath(arg)
elif opt in ('-t',"--template"):
template_id = arg
elif opt in ('-f',"--filename"):
out_file_name = arg
else:
print ('unhandled option %s',opt)
# checking if non optional arguments are passed.
if input_file_name == "":
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
if not path.endswith(os.path.sep):
path = path + os.path.sep
input_textfile_path = os.path.dirname(os.path.abspath(input_file_name)) + os.path.sep
# opening input txt file
f = open(input_file_name,"r")
# Loading template configuration
templateConfig = load_config(template_id,meta_path)
# reqChangePath(path)
# copying all the required files to output path specified
path = copy_files(meta_path + templateConfig["Template path"], path, templateConfig["Template name"])
if templateConfig == {}:
print ("SYS ERR :: INVALID TEMPLATE ID")
sys.exit(1)
# reading the template file
template = open (meta_path + templateConfig["Template path"] + templateConfig["Template name"],"r")
htmlContent = template.read()
# This is very important. This perticular string should be present in template and this is where all the generated div blocks need to be placed
htmlDataPart1, htmlDataPart2 = htmlContent.split('--slide content--', 1)
index = htmlDataPart1.find("</head>")
if index == -1:
index = htmlDataPart1.find("<body")
htmlDataPart1 = htmlDataPart1[:index] + " <link rel=\"stylesheet\" type=\"text/css\" href=\"css/slideCustomCSS.css\"> " + htmlDataPart1[index:]
template.close()
data = f.read()
# Formatting the input text and converting it to html
addStyles(data, path, input_textfile_path)
data = data[data.find("~slidestart"):]
data = convertTextToHtml(data, custom_markdown_args)
data = data.replace("~slidestart",templateConfig["current slide"],1)
data = data.replace("~slidestart",templateConfig["all slides"])
data = data.replace("~slideend","</div>")
data = data.replace("\~slidestart","~slidestart")
data = data.replace("\~slideend","~slideend")
output = convertTextToHtml(data, custom_markdown_args)
# writing all the changes to output file
output = htmlDataPart1 + output + htmlDataPart2
output_file = codecs.open(path+out_file_name, "w", encoding="utf-8", errors="xmlcharrefreplace")
output_file.write(output)
# Close the file
f.close()
# Opens a file
def open_file(file_name, mode):
file_content = None
try:
file_content = open(file_name, mode)
except (OSError, IOError) as e:
print('Error occured while opening file.\n Error: %s' % e)
return file_content
# print directions to use this tool
def usage():
print ("Usage: python3 easyPresentation.py [OPTIONS] <input_filename>")
print (" Note: All the optional arguments must be given before input text file.\n")
print ("-h, --help \t\t display this help and exit")
print ("-o, --outpath \t\t Copy all output files to specified path, \n\t\t\t if no path is specified by default 'outfolder' will be created in current directory")
print ("-f, --filename \t\t change primary out put file name")
print ("-t, --template \t\t select template")
print ("-m, --markdown \t\t specify custom markdown text to html conversion tool\n")
print ("Sorry for the mess, I'm still under development!!")
# Identifying path related special characters and resolving obsolute path, I am not sure if this is needed. But still keeping this for later use
def reqChangePath(path):
# (kp) this check is wrong. hidden files on unix filesystems start with '.' so startswith('.') is not a good check for current directory
# (kp) this is actually the same bug that Ken Thompson had in the original unix impl of the filesystem which lead to dotfiles being hidden in the first place
if path.startswith('.') or path.startswith('~'):
p = Popen("pwd", stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if rc != 0:
print("Invalid cmd")
else:
path = output + path[1.]
if not os.path.exists(path):
os.makedirs(path)
# Read configuration of selected templete
def load_config(tnum, file_loc):
flag = 0
templateConfig = {}
templet = open_file (file_loc+"config","r")
for line in templet :
if line.strip().startswith("Template id") and flag == 1 :
return templateConfig
if line.strip().startswith("Template id") and flag == 0 :
if int(tnum) == int(line.split(':',1)[1].strip()):
flag = 1
if flag == 1 and line.strip() != "":
key = line.split(':',1)[0].strip()
value = line.split(':',1)[1].strip()
if key == "current slide" or key == "all slides":
value = value[:value.find('>')] + " class=\"customSlideCSS\" " + ">"
# value = value + "<div class=\"customSlideCSS\">"
templateConfig[key] = value
return templateConfig
# Ignore specified files while copying
def ignore_function(ignore):
def _ignore_(path, names):
ignored_names = []
if ignore in names:
ignored_names.append(ignore)
return set(ignored_names)
return _ignore_
# Copy all files from source to destination
def copy_files (src, dest, tname):
try:
# Checking if user selected destination directory is already exists
while os.path.exists(dest):
print ('destination directory '+dest+' already exits!!')
is_delete = input("Enter 'Y' to replace directory or 'N' to enter new destination path (Y/N): ")
if is_delete.upper() == 'Y':
shutil.rmtree(dest)
else:
dest = input('Enter new destination path to continue : ') # Reading new destination path
shutil.copytree(src, dest, ignore=ignore_function(tname))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
print('Directory not copied. Error: %s' % e)
return dest
# Creating CSS style class. Need to include CSS3 functionalities.
def addStyles(rawText, path, input_file_path):
preDefCSS = initStyles()
if not path.endswith(os.path.sep):
path = path + os.path.sep
if not os.path.exists(path + "css" + os.path.sep ):
os.makedirs(path + "css" + os.path.sep)
customCSS = open_file (path+"css" + os.path.sep + "slideCustomCSS.css",'w')
customCSS.write(".customSlideCSS { ")
styletext = rawText.split('~slidestart',1)[0]
if styletext != "":
styletext = styletext.replace("[","")
styletext = styletext.replace("]",";")
styles = styletext.split(";")
for style in styles:
if style.strip() != "":
key,value = style.split(':',1)
key = key.strip()
value = value.strip()
if key == "bgimage":
# Creating a folder inside css folder and copying back-ground image into that folder.
if not os.path.exists(path + "css" + os.path.sep + "bgimages" + os.path.sep):
os.makedirs(path+"css" + os.path.sep + " bgimages" + os.path.sep)
shutil.copy(input_file_path+value, path + "css" + os.path.sep + "bgimage")
value = "url(\"bgimage\")"
customCSS.write("{0} : {1};".format(preDefCSS[key],value))
customCSS.write("}")
customCSS.close()
# Initiating basic styles defined for this tool. We can add attribute to below list and use it.
def initStyles():
preDefCSS = {}
preDefCSS["bgimage"] = "background-image"
preDefCSS["bgcolor"] = "background-color"
preDefCSS["bgimage-repeat"] = "background-repeat"
preDefCSS["text-color"] = "color"
preDefCSS["font"] = "font"
preDefCSS["font-size"] = "font-size"
preDefCSS["bgimage-size"] = "background-size"
preDefCSS["left-margin"] = "padding-left"
preDefCSS["top-margin"] = "padding-top"
preDefCSS["bottom-margin"] = "padding-bottom"
preDefCSS["right-margin"] = "padding-right"
return preDefCSS
# Converts markdown text to html text
def convertTextToHtml(data, custom_markdown_args):
output = ""
if custom_markdown_args == "":
output = markdown.markdown(data) # Using imported python markdown
else:
if ' ' in custom_markdown_args:
markdown_cmd = custom_markdown_args.split(' ', 1)[0] # Using custome markdown argument passed through command line
markdown_arguments = custom_markdown_args.split(' ', 1)[1] + " "
else:
markdown_cmd = custom_markdown_args
markdown_arguments = " "
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(bytes(data,'UTF-8'))
temp.flush()
print("markdown_cmd : %s"% markdown_cmd, "markdown_args : %s"% markdown_arguments)
p = Popen(["markdown", markdown_arguments+temp.name], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
temp.close()
os.unlink(temp.name)
if rc != 0:
print (" Invalid markdown script!")
if err != "": | else:
output = output.decode('utf-8')
return output
# begin main()
if __name__ == "__main__":
main(sys.argv[1:]) | print(" Error while running markdown script : ")
print(err)
sys.exit(1) | random_line_split |
easyPresentation.py | # !/usr/bin/python
# This tool converts text file to a html looks like a presenation.
# -*- coding: utf-8 -*-
# Version 1.0 05/01/2015
# ***************************************************************************
# * Copyright (C) 2015, Varun Srinivas Chakravarthi Nalluri *
# * *
# * This program is free software; any one can redistribute it and/or *
# * modify it under the terms of the GNU General Public License as *
# * published by the Free Software Foundation; either version 2 of the *
# * License, or (at your option) any later version. * *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
# * *
# * Purpose : This program accepts a text file and few arguments as *
# * input and generates a output folder, which contains a html file and *
# * its supporting files like js, css etc. This html file contains the *
# * presentation in the form of html content, which can be opened with *
# * any browser. *
# * *
# ***************************************************************************
import os
import sys
import getopt
import codecs
import shutil
import markdown
import tempfile
from subprocess import Popen, PIPE
def main(argv):
template_id = 1
custom_markdown_args = ""
path = "output" + os.path.sep
input_file_name = ""
input_textfile_path = ""
out_file_name = "easyPresentation.html"
argc = len(argv)
# checking if minimum number of arguments required passed
if argc < 1:
print ("Inavlid number of arguments passed\n")
print ("Please use -h or --help for help")
sys.exit(2)
# Reading path of program being run
meta_path = os.path.dirname(os.path.abspath(sys.argv[0])) + os.path.sep
# Reading passed arguments
try:
opts, args = getopt.getopt(argv, 'h:o:t:f:m:d', ["help", "outpath=","template=","filename=","markdown="])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(args) > 0:
input_file_name = args[0]
else:
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ('-m',"--markdown"):
# (kp) use consistent space around operators. if '=' has spaces around it, so should '+'
custom_markdown_args = arg
print("Custom markdown call identified...")
elif opt in ('-o',"--outpath"):
path = arg
# reqChangePath(arg)
elif opt in ('-t',"--template"):
template_id = arg
elif opt in ('-f',"--filename"):
out_file_name = arg
else:
print ('unhandled option %s',opt)
# checking if non optional arguments are passed.
if input_file_name == "":
print ('No input text file given in arguments, input text file is mandatory')
print ('use -h or --help for getting help')
sys.exit(2)
if not path.endswith(os.path.sep):
path = path + os.path.sep
input_textfile_path = os.path.dirname(os.path.abspath(input_file_name)) + os.path.sep
# opening input txt file
f = open(input_file_name,"r")
# Loading template configuration
templateConfig = load_config(template_id,meta_path)
# reqChangePath(path)
# copying all the required files to output path specified
path = copy_files(meta_path + templateConfig["Template path"], path, templateConfig["Template name"])
if templateConfig == {}:
print ("SYS ERR :: INVALID TEMPLATE ID")
sys.exit(1)
# reading the template file
template = open (meta_path + templateConfig["Template path"] + templateConfig["Template name"],"r")
htmlContent = template.read()
# This is very important. This perticular string should be present in template and this is where all the generated div blocks need to be placed
htmlDataPart1, htmlDataPart2 = htmlContent.split('--slide content--', 1)
index = htmlDataPart1.find("</head>")
if index == -1:
index = htmlDataPart1.find("<body")
htmlDataPart1 = htmlDataPart1[:index] + " <link rel=\"stylesheet\" type=\"text/css\" href=\"css/slideCustomCSS.css\"> " + htmlDataPart1[index:]
template.close()
data = f.read()
# Formatting the input text and converting it to html
addStyles(data, path, input_textfile_path)
data = data[data.find("~slidestart"):]
data = convertTextToHtml(data, custom_markdown_args)
data = data.replace("~slidestart",templateConfig["current slide"],1)
data = data.replace("~slidestart",templateConfig["all slides"])
data = data.replace("~slideend","</div>")
data = data.replace("\~slidestart","~slidestart")
data = data.replace("\~slideend","~slideend")
output = convertTextToHtml(data, custom_markdown_args)
# writing all the changes to output file
output = htmlDataPart1 + output + htmlDataPart2
output_file = codecs.open(path+out_file_name, "w", encoding="utf-8", errors="xmlcharrefreplace")
output_file.write(output)
# Close the file
f.close()
# Opens a file
def open_file(file_name, mode):
file_content = None
try:
file_content = open(file_name, mode)
except (OSError, IOError) as e:
print('Error occured while opening file.\n Error: %s' % e)
return file_content
# print directions to use this tool
def usage():
print ("Usage: python3 easyPresentation.py [OPTIONS] <input_filename>")
print (" Note: All the optional arguments must be given before input text file.\n")
print ("-h, --help \t\t display this help and exit")
print ("-o, --outpath \t\t Copy all output files to specified path, \n\t\t\t if no path is specified by default 'outfolder' will be created in current directory")
print ("-f, --filename \t\t change primary out put file name")
print ("-t, --template \t\t select template")
print ("-m, --markdown \t\t specify custom markdown text to html conversion tool\n")
print ("Sorry for the mess, I'm still under development!!")
# Identifying path related special characters and resolving obsolute path, I am not sure if this is needed. But still keeping this for later use
def | (path):
# (kp) this check is wrong. hidden files on unix filesystems start with '.' so startswith('.') is not a good check for current directory
# (kp) this is actually the same bug that Ken Thompson had in the original unix impl of the filesystem which lead to dotfiles being hidden in the first place
if path.startswith('.') or path.startswith('~'):
p = Popen("pwd", stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if rc != 0:
print("Invalid cmd")
else:
path = output + path[1.]
if not os.path.exists(path):
os.makedirs(path)
# Read configuration of selected templete
def load_config(tnum, file_loc):
flag = 0
templateConfig = {}
templet = open_file (file_loc+"config","r")
for line in templet :
if line.strip().startswith("Template id") and flag == 1 :
return templateConfig
if line.strip().startswith("Template id") and flag == 0 :
if int(tnum) == int(line.split(':',1)[1].strip()):
flag = 1
if flag == 1 and line.strip() != "":
key = line.split(':',1)[0].strip()
value = line.split(':',1)[1].strip()
if key == "current slide" or key == "all slides":
value = value[:value.find('>')] + " class=\"customSlideCSS\" " + ">"
# value = value + "<div class=\"customSlideCSS\">"
templateConfig[key] = value
return templateConfig
# Ignore specified files while copying
def ignore_function(ignore):
def _ignore_(path, names):
ignored_names = []
if ignore in names:
ignored_names.append(ignore)
return set(ignored_names)
return _ignore_
# Copy all files from source to destination
def copy_files (src, dest, tname):
try:
# Checking if user selected destination directory is already exists
while os.path.exists(dest):
print ('destination directory '+dest+' already exits!!')
is_delete = input("Enter 'Y' to replace directory or 'N' to enter new destination path (Y/N): ")
if is_delete.upper() == 'Y':
shutil.rmtree(dest)
else:
dest = input('Enter new destination path to continue : ') # Reading new destination path
shutil.copytree(src, dest, ignore=ignore_function(tname))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
print('Directory not copied. Error: %s' % e)
return dest
# Creating CSS style class. Need to include CSS3 functionalities.
def addStyles(rawText, path, input_file_path):
preDefCSS = initStyles()
if not path.endswith(os.path.sep):
path = path + os.path.sep
if not os.path.exists(path + "css" + os.path.sep ):
os.makedirs(path + "css" + os.path.sep)
customCSS = open_file (path+"css" + os.path.sep + "slideCustomCSS.css",'w')
customCSS.write(".customSlideCSS { ")
styletext = rawText.split('~slidestart',1)[0]
if styletext != "":
styletext = styletext.replace("[","")
styletext = styletext.replace("]",";")
styles = styletext.split(";")
for style in styles:
if style.strip() != "":
key,value = style.split(':',1)
key = key.strip()
value = value.strip()
if key == "bgimage":
# Creating a folder inside css folder and copying back-ground image into that folder.
if not os.path.exists(path + "css" + os.path.sep + "bgimages" + os.path.sep):
os.makedirs(path+"css" + os.path.sep + " bgimages" + os.path.sep)
shutil.copy(input_file_path+value, path + "css" + os.path.sep + "bgimage")
value = "url(\"bgimage\")"
customCSS.write("{0} : {1};".format(preDefCSS[key],value))
customCSS.write("}")
customCSS.close()
# Initiating basic styles defined for this tool. We can add attribute to below list and use it.
def initStyles():
preDefCSS = {}
preDefCSS["bgimage"] = "background-image"
preDefCSS["bgcolor"] = "background-color"
preDefCSS["bgimage-repeat"] = "background-repeat"
preDefCSS["text-color"] = "color"
preDefCSS["font"] = "font"
preDefCSS["font-size"] = "font-size"
preDefCSS["bgimage-size"] = "background-size"
preDefCSS["left-margin"] = "padding-left"
preDefCSS["top-margin"] = "padding-top"
preDefCSS["bottom-margin"] = "padding-bottom"
preDefCSS["right-margin"] = "padding-right"
return preDefCSS
# Converts markdown text to html text
def convertTextToHtml(data, custom_markdown_args):
output = ""
if custom_markdown_args == "":
output = markdown.markdown(data) # Using imported python markdown
else:
if ' ' in custom_markdown_args:
markdown_cmd = custom_markdown_args.split(' ', 1)[0] # Using custome markdown argument passed through command line
markdown_arguments = custom_markdown_args.split(' ', 1)[1] + " "
else:
markdown_cmd = custom_markdown_args
markdown_arguments = " "
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(bytes(data,'UTF-8'))
temp.flush()
print("markdown_cmd : %s"% markdown_cmd, "markdown_args : %s"% markdown_arguments)
p = Popen(["markdown", markdown_arguments+temp.name], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
temp.close()
os.unlink(temp.name)
if rc != 0:
print (" Invalid markdown script!")
if err != "":
print(" Error while running markdown script : ")
print(err)
sys.exit(1)
else:
output = output.decode('utf-8')
return output
# begin main()
if __name__ == "__main__":
main(sys.argv[1:])
| reqChangePath | identifier_name |
osutil.py | # -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OS Specific Utilities
=====================
Certain utilities to make the life more easy.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import datetime as _datetime
import errno as _errno
import fcntl as _fcntl
import os as _os
import resource as _resource
import socket as _socket
import sys as _sys
import threading as _threading
import warnings as _warnings
from wtf import Error, WtfWarning
class IdentityWarning(WtfWarning):
""" The attempt to change identity caused a soft error """
class IdentityError(Error):
""" The attempt to change identity caused a hard error """
class SocketError(Error):
""" Socket error """
class AddressError(SocketError):
""" Address resolution error """
class TimeoutError(SocketError):
|
class SSLError(SocketError):
""" SSL error """
def raise_socket_error(timeout=None):
"""
Convert a socket error into an appropriate module exception
This function needs an already raised ``socket.error``.
``raise_socket_error.EAIS`` is a mapping from GAI error numbers to their
names (``{int: 'name', ...}``)
:Parameters:
- `timeout`: applied timeout in seconds, used for the TimeoutError
description
:Types:
- `timeout`: ``float``
:Exceptions:
- `TimeoutError`: ``socket.timeout``
- `AddressError`: address/host resolution error
(``socket.gaierror/herror``)
- `SSLError`: ``socket.sslerror``
- `SocketError`: other socket errors, ``IOError``
- `Exception`: unrecognized exceptions
"""
try:
raise
except _socket.timeout:
if timeout is not None:
raise TimeoutError, "Timed out after %s seconds" % timeout, \
_sys.exc_info()[2]
raise TimeoutError, "Timed out", _sys.exc_info()[2]
except _socket.gaierror, e:
# pylint: disable = E1101
raise AddressError, "Address Information Error: %s (%s)" % \
(raise_socket_error.EAIS.get(e[0], e[0]), e[1]), \
_sys.exc_info()[2]
except _socket.herror, e:
raise AddressError, "Host Resolution Error %s: %s" % \
(e[0], e[1]), _sys.exc_info()[2]
except _socket.sslerror, e:
raise SSLError, "Socket SSL Error: %s" % str(e), _sys.exc_info()[2]
except _socket.error, e:
if len(e.args) == 1:
raise SocketError, "Socket Error: %s" % \
(e[0],), _sys.exc_info()[2]
else:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), e[1]), _sys.exc_info()[2]
except IOError, e:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), str(e)), \
_sys.exc_info()[2]
if 1:
raise_socket_error.EAIS = dict((val, var) # pylint: disable = W0612
for var, val in vars(_socket).items() if var.startswith('EAI_')
)
def unlink_silent(filename):
"""
Unlink a filename, but ignore if it does not exist
:Parameters:
- `filename`: The filename to remove
:Types:
- `filename`: ``basestring``
"""
try:
_os.unlink(filename)
except OSError, e:
if e.errno != _errno.ENOENT:
raise
def close_on_exec(descriptor, close=True):
"""
Mark `descriptor` to be closed on exec (or not)
:Warning: This function is not thread safe (race condition)
:Parameters:
- `descriptor`: An object with ``fileno`` method or an ``int``
representing a low level file descriptor
- `close`: Mark being closed on exec?
:Types:
- `descriptor`: ``file`` or ``int``
- `close`: ``bool``
:Exceptions:
- `IOError`: Something went wrong
"""
try:
fileno = descriptor.fileno
except AttributeError:
fd = descriptor
else:
fd = fileno()
old = _fcntl.fcntl(fd, _fcntl.F_GETFD)
if close:
new = old | _fcntl.FD_CLOEXEC
else:
new = old & ~_fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, new)
def safe_fd(fd):
"""
Ensure that file descriptor fd is >= 3
This is done by dup(2) calls until it's greater than 2. After success
the duped descriptors are closed.
:Parameters:
- `fd`: The file descriptor to process
:Types:
- `fd`: ``int``
:return: The new file descriptor (>=3)
:rtype: ``int``
:Exceptions:
- `OSError`: Duping went wrong
"""
toclose = []
try:
while fd < 3:
toclose.append(fd)
fd = _os.dup(fd)
finally:
for dfd in toclose:
try:
_os.close(dfd)
except OSError:
pass
return fd
def close_descriptors(*keep):
""" Close all file descriptors >= 3 """
keep = set(keep)
try:
flag = _resource.RLIMIT_NOFILE
except AttributeError:
try:
flag = _resource.RLIMIT_OFILE
except AttributeError:
flag = None
if flag is not None:
try:
maxfiles = _resource.getrlimit(flag)[0]
except (_resource.error, ValueError):
flag = None
if flag is None:
maxfiles = 256 # wild guess
for fd in xrange(3, maxfiles + 1):
if fd in keep:
continue
try:
_os.close(fd)
except OSError:
pass
try:
_myflag = _socket.TCP_NODELAY
except AttributeError:
def disable_nagle(sock, peername=None):
"""
Disable nagle algorithm for a TCP socket
:Note: This function is a NOOP on this platform (not implemented).
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
return sock, peername
else:
def disable_nagle(sock, peername=None, _flag=_myflag):
"""
Disable nagle algorithm for a TCP socket
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
if not isinstance(peername, str):
try:
sock.setsockopt(_socket.IPPROTO_TCP, _flag, 1)
except _socket.error:
pass # would have been nice, but, well, not that critical
return sock, peername
_connect_cache = {}
_connect_cache_lock = _threading.Lock()
def connect(spec, timeout=None, nagle_off=True, cache=0,
_cache=_connect_cache, _lock=_connect_cache_lock):
"""
Create and connect a socket to a peer
:Parameters:
- `spec`: The peer specification (``(host, port)`` or ``str``)
- `timeout`: Timeout in seconds
- `nagle_off`: Disable Nagle's algorithm. This option does not
apply to UNIX domain sockets.
:Types:
- `spec`: ``tuple`` or ``str``
- `timeout`: ``float``
- `nagle_off`: ``bool``
:return: The connected socket or ``None`` if no connectable address
could be found
:rtype: ``socket.socket``
:Exceptions:
- `SocketError`: socket error (maybe a subclass of `SocketError`)
- `NotImplementedError`: UNIX domain sockets are not supported in this
platform
"""
# pylint: disable = W0102, R0912, R0915
sock = None
try:
adi = None
if cache > 0:
_lock.acquire()
try:
if spec in _cache:
adi, stamp = _cache[spec]
if stamp < _datetime.datetime.utcnow():
del _cache[spec]
adi = None
finally:
_lock.release()
if adi is None:
if isinstance(spec, str):
try:
AF_UNIX = _socket.AF_UNIX
except AttributeError:
raise NotImplementedError(
"UNIX domain sockets are not supported"
)
adi = [(AF_UNIX, _socket.SOCK_STREAM, 0, None, spec)]
else:
adi = _socket.getaddrinfo(spec[0], spec[1],
_socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, 0)
if cache > 0:
_lock.acquire()
try:
if spec not in _cache:
_cache[spec] = (
adi,
_datetime.datetime.utcnow()
+ _datetime.timedelta(seconds=cache),
)
finally:
_lock.release()
AF_INET6 = getattr(_socket, 'AF_INET6', None)
for family, stype, proto, _, addr in adi:
if not _socket.has_ipv6 and family == AF_INET6:
continue # skip silenty if python was built without it.
sock = _socket.socket(family, stype, proto)
sock.settimeout(timeout)
retry = True
while retry:
try:
sock.connect(addr)
except _socket.timeout:
break
except _socket.error, e:
if e[0] == _errno.EINTR:
continue
elif e[0] in (_errno.ENETUNREACH, _errno.ECONNREFUSED):
break
raise
retry = False
else:
if nagle_off:
disable_nagle(sock)
return sock
sock.close()
except (_socket.error, IOError):
try:
raise_socket_error(timeout=timeout)
except SocketError:
e = _sys.exc_info()
try:
if sock is not None:
sock.close()
finally:
try:
raise e[0], e[1], e[2]
finally:
del e
return None
del _connect_cache, _connect_cache_lock
def change_identity(user, group):
"""
Change identity of the current process
This only works if the effective user ID of the current process is 0.
:Parameters:
- `user`: User identification, if it is interpretable as ``int``, it's
assumed to be a numeric user ID
- `group`: Group identification, if it is interpretable as ``int``, it's
asummed to be a numeric group ID
:Types:
- `user`: ``str``
- `group`: ``str``
:Exceptions:
- `IdentityWarning`: A soft error occured (like not being root)
"""
if _os.geteuid() != 0:
_warnings.warn("Not attempting to change identity (not root)",
category=IdentityWarning)
return
user, group = str(user), str(group)
# resolve user
import pwd
try:
try:
userid = int(user)
except (TypeError, ValueError):
userid = pwd.getpwnam(user).pw_uid
else:
user = pwd.getpwuid(userid).pw_name
except KeyError, e:
raise IdentityError(
"User resolution problem of %r: %s" % (user, str(e))
)
# resolve group
import grp
try:
try:
groupid = int(group)
except (TypeError, ValueError):
groupid = grp.getgrnam(group).gr_gid
else:
group = grp.getgrgid(groupid).gr_name
except KeyError, e:
raise IdentityError(
"Group resolution problem of %r: %s" % (group, str(e))
)
# now do change our identity; group first as we might not have the
# permissions to do so after we left the power of root behind us.
_os.setgid(groupid)
try:
initgroups(user, groupid)
except NotImplementedError:
_warnings.warn("initgroups(3) is not implemented. You have to run "
"without supplemental groups or compile the wtf package "
"properly.", category=IdentityWarning)
_os.setuid(userid)
def initgroups(username, gid):
"""
Implement initgroups(3)
:Parameters:
- `username`: The user name
- `gid`: The group id
:Types:
- `username`: ``str``
- `gid`: ``int``
:Exceptions:
- `OSError`: initgroups() didn't succeed
- `NotImplementedError`: initgroups is not implemented
(needs c-extension)
"""
# pylint: disable = W0613
raise NotImplementedError()
from wtf import c_override
cimpl = c_override('_wtf_cutil')
if cimpl is not None:
# pylint: disable = E1103
initgroups = cimpl.initgroups
del c_override, cimpl
| """ Timeout error """ | identifier_body |
osutil.py | # -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OS Specific Utilities
=====================
Certain utilities to make the life more easy.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import datetime as _datetime
import errno as _errno
import fcntl as _fcntl
import os as _os
import resource as _resource
import socket as _socket
import sys as _sys
import threading as _threading
import warnings as _warnings
from wtf import Error, WtfWarning
class IdentityWarning(WtfWarning):
""" The attempt to change identity caused a soft error """
class IdentityError(Error):
""" The attempt to change identity caused a hard error """
class SocketError(Error):
""" Socket error """
class AddressError(SocketError):
""" Address resolution error """
class TimeoutError(SocketError):
""" Timeout error """
class SSLError(SocketError):
""" SSL error """
def raise_socket_error(timeout=None):
"""
Convert a socket error into an appropriate module exception
This function needs an already raised ``socket.error``.
``raise_socket_error.EAIS`` is a mapping from GAI error numbers to their
names (``{int: 'name', ...}``)
:Parameters:
- `timeout`: applied timeout in seconds, used for the TimeoutError
description
:Types:
- `timeout`: ``float``
:Exceptions:
- `TimeoutError`: ``socket.timeout``
- `AddressError`: address/host resolution error
(``socket.gaierror/herror``)
- `SSLError`: ``socket.sslerror``
- `SocketError`: other socket errors, ``IOError``
- `Exception`: unrecognized exceptions
"""
try:
raise
except _socket.timeout:
if timeout is not None:
raise TimeoutError, "Timed out after %s seconds" % timeout, \
_sys.exc_info()[2]
raise TimeoutError, "Timed out", _sys.exc_info()[2]
except _socket.gaierror, e:
# pylint: disable = E1101
raise AddressError, "Address Information Error: %s (%s)" % \
(raise_socket_error.EAIS.get(e[0], e[0]), e[1]), \
_sys.exc_info()[2]
except _socket.herror, e:
raise AddressError, "Host Resolution Error %s: %s" % \
(e[0], e[1]), _sys.exc_info()[2]
except _socket.sslerror, e:
raise SSLError, "Socket SSL Error: %s" % str(e), _sys.exc_info()[2]
except _socket.error, e:
if len(e.args) == 1:
raise SocketError, "Socket Error: %s" % \
(e[0],), _sys.exc_info()[2]
else:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), e[1]), _sys.exc_info()[2]
except IOError, e:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), str(e)), \
_sys.exc_info()[2]
if 1:
raise_socket_error.EAIS = dict((val, var) # pylint: disable = W0612
for var, val in vars(_socket).items() if var.startswith('EAI_')
)
def unlink_silent(filename):
"""
Unlink a filename, but ignore if it does not exist
:Parameters:
- `filename`: The filename to remove
:Types:
- `filename`: ``basestring``
"""
try:
_os.unlink(filename)
except OSError, e:
if e.errno != _errno.ENOENT:
raise
def close_on_exec(descriptor, close=True):
"""
Mark `descriptor` to be closed on exec (or not)
:Warning: This function is not thread safe (race condition)
:Parameters:
- `descriptor`: An object with ``fileno`` method or an ``int``
representing a low level file descriptor
- `close`: Mark being closed on exec?
:Types:
- `descriptor`: ``file`` or ``int``
- `close`: ``bool``
:Exceptions:
- `IOError`: Something went wrong
"""
try:
fileno = descriptor.fileno
except AttributeError:
fd = descriptor
else:
fd = fileno()
old = _fcntl.fcntl(fd, _fcntl.F_GETFD)
if close:
new = old | _fcntl.FD_CLOEXEC
else:
new = old & ~_fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, new)
def safe_fd(fd):
"""
Ensure that file descriptor fd is >= 3
This is done by dup(2) calls until it's greater than 2. After success
the duped descriptors are closed.
:Parameters:
- `fd`: The file descriptor to process
:Types:
- `fd`: ``int``
:return: The new file descriptor (>=3)
:rtype: ``int``
:Exceptions:
- `OSError`: Duping went wrong
"""
toclose = []
try:
while fd < 3:
toclose.append(fd)
fd = _os.dup(fd)
finally:
for dfd in toclose:
try:
_os.close(dfd)
except OSError:
pass
return fd
def close_descriptors(*keep):
""" Close all file descriptors >= 3 """
keep = set(keep)
try:
flag = _resource.RLIMIT_NOFILE
except AttributeError:
try:
flag = _resource.RLIMIT_OFILE
except AttributeError:
flag = None
if flag is not None:
try:
maxfiles = _resource.getrlimit(flag)[0]
except (_resource.error, ValueError):
flag = None
if flag is None:
maxfiles = 256 # wild guess
for fd in xrange(3, maxfiles + 1):
if fd in keep:
continue
try:
_os.close(fd)
except OSError:
pass
try:
_myflag = _socket.TCP_NODELAY
except AttributeError:
def disable_nagle(sock, peername=None):
"""
Disable nagle algorithm for a TCP socket
:Note: This function is a NOOP on this platform (not implemented).
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
return sock, peername
else:
def disable_nagle(sock, peername=None, _flag=_myflag):
"""
Disable nagle algorithm for a TCP socket
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
if not isinstance(peername, str):
try:
sock.setsockopt(_socket.IPPROTO_TCP, _flag, 1)
except _socket.error:
pass # would have been nice, but, well, not that critical
return sock, peername
_connect_cache = {}
_connect_cache_lock = _threading.Lock()
def connect(spec, timeout=None, nagle_off=True, cache=0,
_cache=_connect_cache, _lock=_connect_cache_lock):
"""
Create and connect a socket to a peer
:Parameters:
- `spec`: The peer specification (``(host, port)`` or ``str``)
- `timeout`: Timeout in seconds
- `nagle_off`: Disable Nagle's algorithm. This option does not
apply to UNIX domain sockets.
:Types:
- `spec`: ``tuple`` or ``str``
- `timeout`: ``float``
- `nagle_off`: ``bool``
:return: The connected socket or ``None`` if no connectable address
could be found
:rtype: ``socket.socket``
:Exceptions:
- `SocketError`: socket error (maybe a subclass of `SocketError`)
- `NotImplementedError`: UNIX domain sockets are not supported in this
platform
"""
# pylint: disable = W0102, R0912, R0915
sock = None
try:
adi = None
if cache > 0:
_lock.acquire()
try:
if spec in _cache:
adi, stamp = _cache[spec]
if stamp < _datetime.datetime.utcnow():
del _cache[spec]
adi = None
finally:
_lock.release()
if adi is None:
if isinstance(spec, str):
try:
AF_UNIX = _socket.AF_UNIX
except AttributeError:
raise NotImplementedError(
"UNIX domain sockets are not supported"
)
adi = [(AF_UNIX, _socket.SOCK_STREAM, 0, None, spec)]
else:
adi = _socket.getaddrinfo(spec[0], spec[1],
_socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, 0)
if cache > 0:
|
AF_INET6 = getattr(_socket, 'AF_INET6', None)
for family, stype, proto, _, addr in adi:
if not _socket.has_ipv6 and family == AF_INET6:
continue # skip silenty if python was built without it.
sock = _socket.socket(family, stype, proto)
sock.settimeout(timeout)
retry = True
while retry:
try:
sock.connect(addr)
except _socket.timeout:
break
except _socket.error, e:
if e[0] == _errno.EINTR:
continue
elif e[0] in (_errno.ENETUNREACH, _errno.ECONNREFUSED):
break
raise
retry = False
else:
if nagle_off:
disable_nagle(sock)
return sock
sock.close()
except (_socket.error, IOError):
try:
raise_socket_error(timeout=timeout)
except SocketError:
e = _sys.exc_info()
try:
if sock is not None:
sock.close()
finally:
try:
raise e[0], e[1], e[2]
finally:
del e
return None
del _connect_cache, _connect_cache_lock
def change_identity(user, group):
"""
Change identity of the current process
This only works if the effective user ID of the current process is 0.
:Parameters:
- `user`: User identification, if it is interpretable as ``int``, it's
assumed to be a numeric user ID
- `group`: Group identification, if it is interpretable as ``int``, it's
asummed to be a numeric group ID
:Types:
- `user`: ``str``
- `group`: ``str``
:Exceptions:
- `IdentityWarning`: A soft error occured (like not being root)
"""
if _os.geteuid() != 0:
_warnings.warn("Not attempting to change identity (not root)",
category=IdentityWarning)
return
user, group = str(user), str(group)
# resolve user
import pwd
try:
try:
userid = int(user)
except (TypeError, ValueError):
userid = pwd.getpwnam(user).pw_uid
else:
user = pwd.getpwuid(userid).pw_name
except KeyError, e:
raise IdentityError(
"User resolution problem of %r: %s" % (user, str(e))
)
# resolve group
import grp
try:
try:
groupid = int(group)
except (TypeError, ValueError):
groupid = grp.getgrnam(group).gr_gid
else:
group = grp.getgrgid(groupid).gr_name
except KeyError, e:
raise IdentityError(
"Group resolution problem of %r: %s" % (group, str(e))
)
# now do change our identity; group first as we might not have the
# permissions to do so after we left the power of root behind us.
_os.setgid(groupid)
try:
initgroups(user, groupid)
except NotImplementedError:
_warnings.warn("initgroups(3) is not implemented. You have to run "
"without supplemental groups or compile the wtf package "
"properly.", category=IdentityWarning)
_os.setuid(userid)
def initgroups(username, gid):
"""
Implement initgroups(3)
:Parameters:
- `username`: The user name
- `gid`: The group id
:Types:
- `username`: ``str``
- `gid`: ``int``
:Exceptions:
- `OSError`: initgroups() didn't succeed
- `NotImplementedError`: initgroups is not implemented
(needs c-extension)
"""
# pylint: disable = W0613
raise NotImplementedError()
from wtf import c_override
cimpl = c_override('_wtf_cutil')
if cimpl is not None:
# pylint: disable = E1103
initgroups = cimpl.initgroups
del c_override, cimpl
| _lock.acquire()
try:
if spec not in _cache:
_cache[spec] = (
adi,
_datetime.datetime.utcnow()
+ _datetime.timedelta(seconds=cache),
)
finally:
_lock.release() | conditional_block |
osutil.py | # -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OS Specific Utilities
=====================
Certain utilities to make the life more easy.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import datetime as _datetime
import errno as _errno
import fcntl as _fcntl
import os as _os
import resource as _resource
import socket as _socket
import sys as _sys
import threading as _threading
import warnings as _warnings
from wtf import Error, WtfWarning
class IdentityWarning(WtfWarning):
""" The attempt to change identity caused a soft error """
class IdentityError(Error):
""" The attempt to change identity caused a hard error """
class SocketError(Error):
""" Socket error """
class AddressError(SocketError):
""" Address resolution error """
class TimeoutError(SocketError):
""" Timeout error """
class SSLError(SocketError):
""" SSL error """
def raise_socket_error(timeout=None):
"""
Convert a socket error into an appropriate module exception
This function needs an already raised ``socket.error``.
``raise_socket_error.EAIS`` is a mapping from GAI error numbers to their
names (``{int: 'name', ...}``)
:Parameters:
- `timeout`: applied timeout in seconds, used for the TimeoutError
description
:Types:
- `timeout`: ``float``
:Exceptions:
- `TimeoutError`: ``socket.timeout``
- `AddressError`: address/host resolution error
(``socket.gaierror/herror``)
- `SSLError`: ``socket.sslerror``
- `SocketError`: other socket errors, ``IOError``
- `Exception`: unrecognized exceptions
"""
try:
raise
except _socket.timeout:
if timeout is not None:
raise TimeoutError, "Timed out after %s seconds" % timeout, \
_sys.exc_info()[2]
raise TimeoutError, "Timed out", _sys.exc_info()[2]
except _socket.gaierror, e:
# pylint: disable = E1101
raise AddressError, "Address Information Error: %s (%s)" % \
(raise_socket_error.EAIS.get(e[0], e[0]), e[1]), \
_sys.exc_info()[2]
except _socket.herror, e:
raise AddressError, "Host Resolution Error %s: %s" % \
(e[0], e[1]), _sys.exc_info()[2]
except _socket.sslerror, e:
raise SSLError, "Socket SSL Error: %s" % str(e), _sys.exc_info()[2]
except _socket.error, e:
if len(e.args) == 1:
raise SocketError, "Socket Error: %s" % \
(e[0],), _sys.exc_info()[2]
else:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), e[1]), _sys.exc_info()[2]
except IOError, e:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), str(e)), \
_sys.exc_info()[2]
if 1:
raise_socket_error.EAIS = dict((val, var) # pylint: disable = W0612
for var, val in vars(_socket).items() if var.startswith('EAI_')
)
def unlink_silent(filename):
"""
Unlink a filename, but ignore if it does not exist
:Parameters:
- `filename`: The filename to remove
:Types:
- `filename`: ``basestring``
"""
try:
_os.unlink(filename)
except OSError, e:
if e.errno != _errno.ENOENT:
raise
def close_on_exec(descriptor, close=True):
"""
Mark `descriptor` to be closed on exec (or not)
:Warning: This function is not thread safe (race condition)
:Parameters:
- `descriptor`: An object with ``fileno`` method or an ``int``
representing a low level file descriptor
- `close`: Mark being closed on exec?
:Types:
- `descriptor`: ``file`` or ``int``
- `close`: ``bool``
:Exceptions:
- `IOError`: Something went wrong
"""
try:
fileno = descriptor.fileno
except AttributeError:
fd = descriptor
else:
fd = fileno()
old = _fcntl.fcntl(fd, _fcntl.F_GETFD)
if close:
new = old | _fcntl.FD_CLOEXEC
else:
new = old & ~_fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, new)
def safe_fd(fd):
"""
Ensure that file descriptor fd is >= 3
This is done by dup(2) calls until it's greater than 2. After success
the duped descriptors are closed.
:Parameters:
- `fd`: The file descriptor to process
:Types:
- `fd`: ``int``
:return: The new file descriptor (>=3)
:rtype: ``int``
:Exceptions:
- `OSError`: Duping went wrong
"""
toclose = [] | finally:
for dfd in toclose:
try:
_os.close(dfd)
except OSError:
pass
return fd
def close_descriptors(*keep):
""" Close all file descriptors >= 3 """
keep = set(keep)
try:
flag = _resource.RLIMIT_NOFILE
except AttributeError:
try:
flag = _resource.RLIMIT_OFILE
except AttributeError:
flag = None
if flag is not None:
try:
maxfiles = _resource.getrlimit(flag)[0]
except (_resource.error, ValueError):
flag = None
if flag is None:
maxfiles = 256 # wild guess
for fd in xrange(3, maxfiles + 1):
if fd in keep:
continue
try:
_os.close(fd)
except OSError:
pass
try:
_myflag = _socket.TCP_NODELAY
except AttributeError:
def disable_nagle(sock, peername=None):
"""
Disable nagle algorithm for a TCP socket
:Note: This function is a NOOP on this platform (not implemented).
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
return sock, peername
else:
def disable_nagle(sock, peername=None, _flag=_myflag):
"""
Disable nagle algorithm for a TCP socket
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
if not isinstance(peername, str):
try:
sock.setsockopt(_socket.IPPROTO_TCP, _flag, 1)
except _socket.error:
pass # would have been nice, but, well, not that critical
return sock, peername
_connect_cache = {}
_connect_cache_lock = _threading.Lock()
def connect(spec, timeout=None, nagle_off=True, cache=0,
_cache=_connect_cache, _lock=_connect_cache_lock):
"""
Create and connect a socket to a peer
:Parameters:
- `spec`: The peer specification (``(host, port)`` or ``str``)
- `timeout`: Timeout in seconds
- `nagle_off`: Disable Nagle's algorithm. This option does not
apply to UNIX domain sockets.
:Types:
- `spec`: ``tuple`` or ``str``
- `timeout`: ``float``
- `nagle_off`: ``bool``
:return: The connected socket or ``None`` if no connectable address
could be found
:rtype: ``socket.socket``
:Exceptions:
- `SocketError`: socket error (maybe a subclass of `SocketError`)
- `NotImplementedError`: UNIX domain sockets are not supported in this
platform
"""
# pylint: disable = W0102, R0912, R0915
sock = None
try:
adi = None
if cache > 0:
_lock.acquire()
try:
if spec in _cache:
adi, stamp = _cache[spec]
if stamp < _datetime.datetime.utcnow():
del _cache[spec]
adi = None
finally:
_lock.release()
if adi is None:
if isinstance(spec, str):
try:
AF_UNIX = _socket.AF_UNIX
except AttributeError:
raise NotImplementedError(
"UNIX domain sockets are not supported"
)
adi = [(AF_UNIX, _socket.SOCK_STREAM, 0, None, spec)]
else:
adi = _socket.getaddrinfo(spec[0], spec[1],
_socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, 0)
if cache > 0:
_lock.acquire()
try:
if spec not in _cache:
_cache[spec] = (
adi,
_datetime.datetime.utcnow()
+ _datetime.timedelta(seconds=cache),
)
finally:
_lock.release()
AF_INET6 = getattr(_socket, 'AF_INET6', None)
for family, stype, proto, _, addr in adi:
if not _socket.has_ipv6 and family == AF_INET6:
continue # skip silenty if python was built without it.
sock = _socket.socket(family, stype, proto)
sock.settimeout(timeout)
retry = True
while retry:
try:
sock.connect(addr)
except _socket.timeout:
break
except _socket.error, e:
if e[0] == _errno.EINTR:
continue
elif e[0] in (_errno.ENETUNREACH, _errno.ECONNREFUSED):
break
raise
retry = False
else:
if nagle_off:
disable_nagle(sock)
return sock
sock.close()
except (_socket.error, IOError):
try:
raise_socket_error(timeout=timeout)
except SocketError:
e = _sys.exc_info()
try:
if sock is not None:
sock.close()
finally:
try:
raise e[0], e[1], e[2]
finally:
del e
return None
del _connect_cache, _connect_cache_lock
def change_identity(user, group):
"""
Change identity of the current process
This only works if the effective user ID of the current process is 0.
:Parameters:
- `user`: User identification, if it is interpretable as ``int``, it's
assumed to be a numeric user ID
- `group`: Group identification, if it is interpretable as ``int``, it's
asummed to be a numeric group ID
:Types:
- `user`: ``str``
- `group`: ``str``
:Exceptions:
- `IdentityWarning`: A soft error occured (like not being root)
"""
if _os.geteuid() != 0:
_warnings.warn("Not attempting to change identity (not root)",
category=IdentityWarning)
return
user, group = str(user), str(group)
# resolve user
import pwd
try:
try:
userid = int(user)
except (TypeError, ValueError):
userid = pwd.getpwnam(user).pw_uid
else:
user = pwd.getpwuid(userid).pw_name
except KeyError, e:
raise IdentityError(
"User resolution problem of %r: %s" % (user, str(e))
)
# resolve group
import grp
try:
try:
groupid = int(group)
except (TypeError, ValueError):
groupid = grp.getgrnam(group).gr_gid
else:
group = grp.getgrgid(groupid).gr_name
except KeyError, e:
raise IdentityError(
"Group resolution problem of %r: %s" % (group, str(e))
)
# now do change our identity; group first as we might not have the
# permissions to do so after we left the power of root behind us.
_os.setgid(groupid)
try:
initgroups(user, groupid)
except NotImplementedError:
_warnings.warn("initgroups(3) is not implemented. You have to run "
"without supplemental groups or compile the wtf package "
"properly.", category=IdentityWarning)
_os.setuid(userid)
def initgroups(username, gid):
"""
Implement initgroups(3)
:Parameters:
- `username`: The user name
- `gid`: The group id
:Types:
- `username`: ``str``
- `gid`: ``int``
:Exceptions:
- `OSError`: initgroups() didn't succeed
- `NotImplementedError`: initgroups is not implemented
(needs c-extension)
"""
# pylint: disable = W0613
raise NotImplementedError()
from wtf import c_override
cimpl = c_override('_wtf_cutil')
if cimpl is not None:
# pylint: disable = E1103
initgroups = cimpl.initgroups
del c_override, cimpl | try:
while fd < 3:
toclose.append(fd)
fd = _os.dup(fd) | random_line_split |
osutil.py | # -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OS Specific Utilities
=====================
Certain utilities to make the life more easy.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import datetime as _datetime
import errno as _errno
import fcntl as _fcntl
import os as _os
import resource as _resource
import socket as _socket
import sys as _sys
import threading as _threading
import warnings as _warnings
from wtf import Error, WtfWarning
class IdentityWarning(WtfWarning):
""" The attempt to change identity caused a soft error """
class | (Error):
""" The attempt to change identity caused a hard error """
class SocketError(Error):
""" Socket error """
class AddressError(SocketError):
""" Address resolution error """
class TimeoutError(SocketError):
""" Timeout error """
class SSLError(SocketError):
""" SSL error """
def raise_socket_error(timeout=None):
"""
Convert a socket error into an appropriate module exception
This function needs an already raised ``socket.error``.
``raise_socket_error.EAIS`` is a mapping from GAI error numbers to their
names (``{int: 'name', ...}``)
:Parameters:
- `timeout`: applied timeout in seconds, used for the TimeoutError
description
:Types:
- `timeout`: ``float``
:Exceptions:
- `TimeoutError`: ``socket.timeout``
- `AddressError`: address/host resolution error
(``socket.gaierror/herror``)
- `SSLError`: ``socket.sslerror``
- `SocketError`: other socket errors, ``IOError``
- `Exception`: unrecognized exceptions
"""
try:
raise
except _socket.timeout:
if timeout is not None:
raise TimeoutError, "Timed out after %s seconds" % timeout, \
_sys.exc_info()[2]
raise TimeoutError, "Timed out", _sys.exc_info()[2]
except _socket.gaierror, e:
# pylint: disable = E1101
raise AddressError, "Address Information Error: %s (%s)" % \
(raise_socket_error.EAIS.get(e[0], e[0]), e[1]), \
_sys.exc_info()[2]
except _socket.herror, e:
raise AddressError, "Host Resolution Error %s: %s" % \
(e[0], e[1]), _sys.exc_info()[2]
except _socket.sslerror, e:
raise SSLError, "Socket SSL Error: %s" % str(e), _sys.exc_info()[2]
except _socket.error, e:
if len(e.args) == 1:
raise SocketError, "Socket Error: %s" % \
(e[0],), _sys.exc_info()[2]
else:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), e[1]), _sys.exc_info()[2]
except IOError, e:
raise SocketError, "Socket Error %s: %s" % \
(_errno.errorcode.get(e[0], e[0]), str(e)), \
_sys.exc_info()[2]
if 1:
raise_socket_error.EAIS = dict((val, var) # pylint: disable = W0612
for var, val in vars(_socket).items() if var.startswith('EAI_')
)
def unlink_silent(filename):
"""
Unlink a filename, but ignore if it does not exist
:Parameters:
- `filename`: The filename to remove
:Types:
- `filename`: ``basestring``
"""
try:
_os.unlink(filename)
except OSError, e:
if e.errno != _errno.ENOENT:
raise
def close_on_exec(descriptor, close=True):
"""
Mark `descriptor` to be closed on exec (or not)
:Warning: This function is not thread safe (race condition)
:Parameters:
- `descriptor`: An object with ``fileno`` method or an ``int``
representing a low level file descriptor
- `close`: Mark being closed on exec?
:Types:
- `descriptor`: ``file`` or ``int``
- `close`: ``bool``
:Exceptions:
- `IOError`: Something went wrong
"""
try:
fileno = descriptor.fileno
except AttributeError:
fd = descriptor
else:
fd = fileno()
old = _fcntl.fcntl(fd, _fcntl.F_GETFD)
if close:
new = old | _fcntl.FD_CLOEXEC
else:
new = old & ~_fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, new)
def safe_fd(fd):
"""
Ensure that file descriptor fd is >= 3
This is done by dup(2) calls until it's greater than 2. After success
the duped descriptors are closed.
:Parameters:
- `fd`: The file descriptor to process
:Types:
- `fd`: ``int``
:return: The new file descriptor (>=3)
:rtype: ``int``
:Exceptions:
- `OSError`: Duping went wrong
"""
toclose = []
try:
while fd < 3:
toclose.append(fd)
fd = _os.dup(fd)
finally:
for dfd in toclose:
try:
_os.close(dfd)
except OSError:
pass
return fd
def close_descriptors(*keep):
""" Close all file descriptors >= 3 """
keep = set(keep)
try:
flag = _resource.RLIMIT_NOFILE
except AttributeError:
try:
flag = _resource.RLIMIT_OFILE
except AttributeError:
flag = None
if flag is not None:
try:
maxfiles = _resource.getrlimit(flag)[0]
except (_resource.error, ValueError):
flag = None
if flag is None:
maxfiles = 256 # wild guess
for fd in xrange(3, maxfiles + 1):
if fd in keep:
continue
try:
_os.close(fd)
except OSError:
pass
try:
_myflag = _socket.TCP_NODELAY
except AttributeError:
def disable_nagle(sock, peername=None):
"""
Disable nagle algorithm for a TCP socket
:Note: This function is a NOOP on this platform (not implemented).
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
return sock, peername
else:
def disable_nagle(sock, peername=None, _flag=_myflag):
"""
Disable nagle algorithm for a TCP socket
:Parameters:
- `sock`: Socket to process
- `peername`: The name of the remote socket, if ``str``, it's a UNIX
domain socket and the function does nothing
:Types:
- `sock`: ``socket.socket``
- `peername`: ``str`` or ``tuple``
:return: The socket and the peername again (if the latter was passed
as ``None``, it will be set to something useful
:rtype: ``tuple``
:Exceptions:
- `socket.error`: The socket was probably not connected. If setting
of the option fails, no socket error is thrown though. It's ignored.
"""
if peername is None:
peername = sock.getpeername()
if not isinstance(peername, str):
try:
sock.setsockopt(_socket.IPPROTO_TCP, _flag, 1)
except _socket.error:
pass # would have been nice, but, well, not that critical
return sock, peername
_connect_cache = {}
_connect_cache_lock = _threading.Lock()
def connect(spec, timeout=None, nagle_off=True, cache=0,
_cache=_connect_cache, _lock=_connect_cache_lock):
"""
Create and connect a socket to a peer
:Parameters:
- `spec`: The peer specification (``(host, port)`` or ``str``)
- `timeout`: Timeout in seconds
- `nagle_off`: Disable Nagle's algorithm. This option does not
apply to UNIX domain sockets.
:Types:
- `spec`: ``tuple`` or ``str``
- `timeout`: ``float``
- `nagle_off`: ``bool``
:return: The connected socket or ``None`` if no connectable address
could be found
:rtype: ``socket.socket``
:Exceptions:
- `SocketError`: socket error (maybe a subclass of `SocketError`)
- `NotImplementedError`: UNIX domain sockets are not supported in this
platform
"""
# pylint: disable = W0102, R0912, R0915
sock = None
try:
adi = None
if cache > 0:
_lock.acquire()
try:
if spec in _cache:
adi, stamp = _cache[spec]
if stamp < _datetime.datetime.utcnow():
del _cache[spec]
adi = None
finally:
_lock.release()
if adi is None:
if isinstance(spec, str):
try:
AF_UNIX = _socket.AF_UNIX
except AttributeError:
raise NotImplementedError(
"UNIX domain sockets are not supported"
)
adi = [(AF_UNIX, _socket.SOCK_STREAM, 0, None, spec)]
else:
adi = _socket.getaddrinfo(spec[0], spec[1],
_socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, 0)
if cache > 0:
_lock.acquire()
try:
if spec not in _cache:
_cache[spec] = (
adi,
_datetime.datetime.utcnow()
+ _datetime.timedelta(seconds=cache),
)
finally:
_lock.release()
AF_INET6 = getattr(_socket, 'AF_INET6', None)
for family, stype, proto, _, addr in adi:
if not _socket.has_ipv6 and family == AF_INET6:
continue # skip silenty if python was built without it.
sock = _socket.socket(family, stype, proto)
sock.settimeout(timeout)
retry = True
while retry:
try:
sock.connect(addr)
except _socket.timeout:
break
except _socket.error, e:
if e[0] == _errno.EINTR:
continue
elif e[0] in (_errno.ENETUNREACH, _errno.ECONNREFUSED):
break
raise
retry = False
else:
if nagle_off:
disable_nagle(sock)
return sock
sock.close()
except (_socket.error, IOError):
try:
raise_socket_error(timeout=timeout)
except SocketError:
e = _sys.exc_info()
try:
if sock is not None:
sock.close()
finally:
try:
raise e[0], e[1], e[2]
finally:
del e
return None
del _connect_cache, _connect_cache_lock
def change_identity(user, group):
"""
Change identity of the current process
This only works if the effective user ID of the current process is 0.
:Parameters:
- `user`: User identification, if it is interpretable as ``int``, it's
assumed to be a numeric user ID
- `group`: Group identification, if it is interpretable as ``int``, it's
asummed to be a numeric group ID
:Types:
- `user`: ``str``
- `group`: ``str``
:Exceptions:
- `IdentityWarning`: A soft error occured (like not being root)
"""
if _os.geteuid() != 0:
_warnings.warn("Not attempting to change identity (not root)",
category=IdentityWarning)
return
user, group = str(user), str(group)
# resolve user
import pwd
try:
try:
userid = int(user)
except (TypeError, ValueError):
userid = pwd.getpwnam(user).pw_uid
else:
user = pwd.getpwuid(userid).pw_name
except KeyError, e:
raise IdentityError(
"User resolution problem of %r: %s" % (user, str(e))
)
# resolve group
import grp
try:
try:
groupid = int(group)
except (TypeError, ValueError):
groupid = grp.getgrnam(group).gr_gid
else:
group = grp.getgrgid(groupid).gr_name
except KeyError, e:
raise IdentityError(
"Group resolution problem of %r: %s" % (group, str(e))
)
# now do change our identity; group first as we might not have the
# permissions to do so after we left the power of root behind us.
_os.setgid(groupid)
try:
initgroups(user, groupid)
except NotImplementedError:
_warnings.warn("initgroups(3) is not implemented. You have to run "
"without supplemental groups or compile the wtf package "
"properly.", category=IdentityWarning)
_os.setuid(userid)
def initgroups(username, gid):
"""
Implement initgroups(3)
:Parameters:
- `username`: The user name
- `gid`: The group id
:Types:
- `username`: ``str``
- `gid`: ``int``
:Exceptions:
- `OSError`: initgroups() didn't succeed
- `NotImplementedError`: initgroups is not implemented
(needs c-extension)
"""
# pylint: disable = W0613
raise NotImplementedError()
from wtf import c_override
cimpl = c_override('_wtf_cutil')
if cimpl is not None:
# pylint: disable = E1103
initgroups = cimpl.initgroups
del c_override, cimpl
| IdentityError | identifier_name |
sshCopy.py | #!/usr/bin/python
import paramiko
import sys
import os
import string
import threading
import subprocess
import time
import select
import datetime
from os.path import expanduser
import qs
global threads
threads = []
global upload
upload = False
class FileCopy:
def __init__(self):
self.numCams = 21
self.hosts = self.hostInit()
self.filesTransferred = 0
self.filesToTransfer = 0
def hostInit(self):
hosts = []
for i in range(1, 1 + self.numCams):
num = "%02d" % (i,)
hosts.append('192.168.0.2' + num)
return hosts
#pings the address
#has a really short timeout. With the devices on a local network the connected devices respond really fast. So there is no need to take extra time.
def ping(address):
# print (address)
try:
output = subprocess.check_output(["ping.exe", "-n", "1", "-w", "1", str(address)])
# on windows a ping that doesn't get a response does not return -1, unless it fails because of a timeout
if (b'unreachable' in output):
# print("Offline")
return False
else:
# print ("online")
return True
except Exception as e:
# a timeout will return -1
if('non-zero' in str(e)):
pass
# print ('timedout')
else:
print (e)
# print ("offline / didnt work")
return False
def workon(host,localDir, indexStart):
if ping(host):
# print (host)
ssh = paramiko.SSHClient()
# print ('client created' + str(host))
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# print ('set missing key policy' + str(host))
ssh.connect(host, username='pi', password='biomech1')
print ('connected' + str(host))
#######
# setup connection to pi
#########
sftp = ssh.open_sftp()
piDir = '/home/pi/piTemp'
#######
# copy files from raspi
##########
copyFiles(sftp, piDir, host, localDir, indexStart)
else:
pass
def copyFiles(sftp, piDir, host, localDir, indexStart):
fileList = sftp.listdir(piDir)
sortedFiles = sorted(fileList)
fileListLength = len(sortedFiles)
index = indexStart
allCopied = True #this gets set to false if there is an exception
for count, file in enumerate(sortedFiles):
try:
print ('trying to get file ' + str(count + 1) + ' of ' + str(fileListLength) + ' from ' + host[10:13] )
indexString = str(index)
if(index < 10):
indexString = "00" + str(index)
elif(index > 9 and index < 100):
indexString = "0" + str(index)
#grab the file from the pi, add index & host name to the file.
sftp.get((piDir + '/' +file),(localDir + '/' + host[10:13] + '_' + indexString + '_' + rmvIlligal(file) + '.jpg'))
index += 1
except Exception as e:
allCopied = False
print (str(e))
print ('couldnt get photo ' + file + ' from host ' + host[10:13])
# if all the photos were succesfully copied then delete the originals
if(allCopied):
for file in sortedFiles:
try:
sftp.remove((piDir + '/' + file))
print (host[10:13] + ' ' + file + ' removed')
except Exception as e:
print (e)
print ("done " + host)
def rmvIlligal(input):
# print (input)
valid_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
output = ''
for c in input:
if c in valid_chars:
output += c
length = len(output)
return output[0:11]
class FolderName():
def __init__(self):
self.subjectIdentifier = None
self.increment = None
self.homeDir = self.getDirectory()
self.lastFolder = self.getLastFolder()
self.date = self.getDate()
self.newFolderName = ""
self.useOldFolder = False
# check the home directory
# setup scanFolder in the documents folder if it doesn't already exist
def getDirectory(self):
home = expanduser("~")
scanFolder = '\Documents\ScanFolder'
homeDir = home + scanFolder
if not os.path.exists(homeDir):
os.mkdir(homeDir)
return homeDir
# this is it's own fuction in case the format of the date needs to be changed
def getDate(self):
date = str(datetime.date.today())
# date = date.replace("-", "_")
return date
#check scanfolder directory for the most recent folder
def getLastFolder(self):
folder = list(os.scandir(self.homeDir))
# check for most recent folder name
# if the most recent folder is a calibration folder then ignore it.
if( 0 < len(folder)):
sortedFolders = sorted(folder, key = lambda x: x.stat().st_mtime, reverse = True) #sort the folders by time they were modified
#if the most recent file is a calibration file, then ignore it.
while('s' not in sortedFolders[0].name):
sortedFolders.pop(0)
if(0 == len(sortedFolders)):
return None
oldSplit = sortedFolders[0].name.split('_')
if(4 == len(oldSplit)):
self.subjectIdentifier = oldSplit[0]
self.collectedDataType = oldSplit[2]
self.increment = oldSplit[3]
return sortedFolders[0].name
else:
return None
else:
print (" *** There are no previous files *** ")
return None
# Checks the user input
# determins the folder name
# if the input is valid it returns True
def checkInput(self,userInput):
splitInput = userInput
# check if input is 'enter', this should then use the most recent folder as the current folder
if [] == splitInput:
self.newFolderName = self.homeDir + '\\' + self.lastFolder
self.useOldFolder = True
return True
#check if the fist term entered is the subject identifier (s1, s2, etc.)
#store the subject identifier, then remove it from the list.
#check for aditional filename info
if 's' == splitInput[0][0]:
if( 1 < len(splitInput[0]) and splitInput[0][1].isdigit()):
self.subjectIdentifier = splitInput[0]
splitInput.pop(0)
checkData = splitInput.pop(0)
# increment folder by letter if input is 'inc'/ or set increment to 'a'
if 'inc' == checkData and not None == self.increment:
self.increment = chr(ord(self.increment) + 1)
print (self.increment)
return True
else:
self.increment = 'a'
#muscle Contractions
if 'mc' == checkData or 'ms' == checkData or 'mus' in checkData:
self.collectedDataType = "muscleContractions"
return True
#swelling: cast or socket or just swelling
elif 's' == checkData or'sw' in checkData:
self.collectedDataType = "swelling"
if(0 < len(splitInput)):
if 'c' == splitInput[0] or 'cast' == splitInput[0]:
self.collectedDataType +="Cast"
elif 's' == splitInput[0] or 'so' in splitInput[0]:
self.collectedDataType += "Socket"
return True
elif 'sc' == checkData:
self.collectedDataType = "swellingCast"
return True
elif 'ss' == checkData:
self.collectedDataType = "swellingSocket"
return True
#reference | self.collectedDataType = "ref"
return True
#indentations
elif 'i' == checkData or 'in' == checkData or 'ind' in checkData:
self.collectedDataType = "indentation"
if(0 < len(splitInput)):
if splitInput[0].isdigit():
self.collectedDataType += '-' + splitInput[0]
return True
elif 'test' == checkData:
self.collectedDataType = 'test'
self.subjectIdentifier = 'test'
else:
return False
# Check if all pieces of the foldername exist
# generates folder & returns True or returns false if it cant
def generateFolder(self):
if self.useOldFolder:
return True
elif None == self.subjectIdentifier or None == self.collectedDataType or None == self.increment:
return False
else:
while (os.path.exists(self.generateFolderName())):
self.increment = self.increment = chr(ord(self.increment) + 1)
os.mkdir(self.newFolderName)
return True
def generateFolderName(self):
self.newFolderName = self.homeDir + '\\' + self.subjectIdentifier + '_' + self.date + '_' + self.collectedDataType + '_' + self.increment
return self.newFolderName
# files are indexed by time (001, 002, 003 etc), if files are going to be copied to a folder that already has images in it, then this checks what they index of the highest file is
def indexLocal(self):
fileList = os.listdir(self.newFolderName)
#for an empty folder set index of 1
if(0 == len(fileList)):
return 1
else:
indexList = []
# print (fileList)
for item in fileList:
# print (item)
itemLen = len(item)
itemIndex = item[4:itemLen-16]
indexList.append(int(itemIndex))
# print (indexList)
# print ("sorted")
sortedIndex = sorted(indexList)
print (sortedIndex[-1])
return (sortedIndex[-1] + 1)
# This is the string the user sees when starting the program.
# It provides instructions of the legal inputs
introString = """
File copying instructions:
If this is the first file for this subject enter the subject identifier eg. 's1' followed by the following folder type.
The subject identifier can be left out if the previous folder has the identifier.
Folder types:
test: enter 'test' and both subject identifier and data type will be set to 'test'
ref: enter 'r' or 'ref'
muscle contractions: enter 'mc', 'ms', or string that includes 'mus' such as 'muscle'
swelling: enter 's', 'sw', 'swell', or swelling
for swelling cast enter 'sc' or enter the above followed by ' c'
for swelling socket enter 'ss' or enter the above followed by ' s'
indentation: enter 'i', in', or string including 'ind' followed by a space and the indentation number
To increment the previous folder either enter 'inc'
To increment an earlier folder (that is not the most recent folder) enter the folder type and it will be auto incremented.
To copy again to the most recent folder hit enter with no arguments.
enter "cntrl c" to quit
"""
def foldersetup(argv):
argv.pop(0)
myfolder = FolderName()
if(0 < len(argv)):
myfolder.checkInput(argv)
else:
print (introString)
try:
print ('The most recent folder is *** ' + myfolder.lastFolder + ' ***')
except:
pass
myfolder.checkInput(input().split())
if myfolder.generateFolder():
print ("generating new folder")
print (myfolder.newFolderName)
return myfolder
else:
return None
def main():
# threads = []
folder = foldersetup(sys.argv)
path = folder.newFolderName
if(not (None == path)):
fileCopier = FileCopy()
index = folder.indexLocal()
for h in fileCopier.hosts:
t = threading.Thread(target=workon, args=(h, path, index))
t.start()
threads.append(t)
for t in threads:
t.join
if __name__ == "__main__":
print ("sshCopy is main")
main()
# clusterssh [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] | elif 'r' == checkData or 're' in checkData: | random_line_split |
sshCopy.py | #!/usr/bin/python
import paramiko
import sys
import os
import string
import threading
import subprocess
import time
import select
import datetime
from os.path import expanduser
import qs
global threads
threads = []
global upload
upload = False
class FileCopy:
def __init__(self):
self.numCams = 21
self.hosts = self.hostInit()
self.filesTransferred = 0
self.filesToTransfer = 0
def hostInit(self):
hosts = []
for i in range(1, 1 + self.numCams):
num = "%02d" % (i,)
hosts.append('192.168.0.2' + num)
return hosts
#pings the address
#has a really short timeout. With the devices on a local network the connected devices respond really fast. So there is no need to take extra time.
def ping(address):
# print (address)
try:
output = subprocess.check_output(["ping.exe", "-n", "1", "-w", "1", str(address)])
# on windows a ping that doesn't get a response does not return -1, unless it fails because of a timeout
if (b'unreachable' in output):
# print("Offline")
return False
else:
# print ("online")
return True
except Exception as e:
# a timeout will return -1
if('non-zero' in str(e)):
pass
# print ('timedout')
else:
print (e)
# print ("offline / didnt work")
return False
def workon(host,localDir, indexStart):
if ping(host):
# print (host)
ssh = paramiko.SSHClient()
# print ('client created' + str(host))
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# print ('set missing key policy' + str(host))
ssh.connect(host, username='pi', password='biomech1')
print ('connected' + str(host))
#######
# setup connection to pi
#########
sftp = ssh.open_sftp()
piDir = '/home/pi/piTemp'
#######
# copy files from raspi
##########
copyFiles(sftp, piDir, host, localDir, indexStart)
else:
pass
def copyFiles(sftp, piDir, host, localDir, indexStart):
fileList = sftp.listdir(piDir)
sortedFiles = sorted(fileList)
fileListLength = len(sortedFiles)
index = indexStart
allCopied = True #this gets set to false if there is an exception
for count, file in enumerate(sortedFiles):
try:
print ('trying to get file ' + str(count + 1) + ' of ' + str(fileListLength) + ' from ' + host[10:13] )
indexString = str(index)
if(index < 10):
indexString = "00" + str(index)
elif(index > 9 and index < 100):
indexString = "0" + str(index)
#grab the file from the pi, add index & host name to the file.
sftp.get((piDir + '/' +file),(localDir + '/' + host[10:13] + '_' + indexString + '_' + rmvIlligal(file) + '.jpg'))
index += 1
except Exception as e:
allCopied = False
print (str(e))
print ('couldnt get photo ' + file + ' from host ' + host[10:13])
# if all the photos were succesfully copied then delete the originals
if(allCopied):
for file in sortedFiles:
try:
sftp.remove((piDir + '/' + file))
print (host[10:13] + ' ' + file + ' removed')
except Exception as e:
print (e)
print ("done " + host)
def rmvIlligal(input):
# print (input)
valid_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
output = ''
for c in input:
if c in valid_chars:
output += c
length = len(output)
return output[0:11]
class FolderName():
def __init__(self):
self.subjectIdentifier = None
self.increment = None
self.homeDir = self.getDirectory()
self.lastFolder = self.getLastFolder()
self.date = self.getDate()
self.newFolderName = ""
self.useOldFolder = False
# check the home directory
# setup scanFolder in the documents folder if it doesn't already exist
def getDirectory(self):
home = expanduser("~")
scanFolder = '\Documents\ScanFolder'
homeDir = home + scanFolder
if not os.path.exists(homeDir):
os.mkdir(homeDir)
return homeDir
# this is it's own fuction in case the format of the date needs to be changed
def getDate(self):
date = str(datetime.date.today())
# date = date.replace("-", "_")
return date
#check scanfolder directory for the most recent folder
def | (self):
folder = list(os.scandir(self.homeDir))
# check for most recent folder name
# if the most recent folder is a calibration folder then ignore it.
if( 0 < len(folder)):
sortedFolders = sorted(folder, key = lambda x: x.stat().st_mtime, reverse = True) #sort the folders by time they were modified
#if the most recent file is a calibration file, then ignore it.
while('s' not in sortedFolders[0].name):
sortedFolders.pop(0)
if(0 == len(sortedFolders)):
return None
oldSplit = sortedFolders[0].name.split('_')
if(4 == len(oldSplit)):
self.subjectIdentifier = oldSplit[0]
self.collectedDataType = oldSplit[2]
self.increment = oldSplit[3]
return sortedFolders[0].name
else:
return None
else:
print (" *** There are no previous files *** ")
return None
# Checks the user input
# determins the folder name
# if the input is valid it returns True
def checkInput(self,userInput):
splitInput = userInput
# check if input is 'enter', this should then use the most recent folder as the current folder
if [] == splitInput:
self.newFolderName = self.homeDir + '\\' + self.lastFolder
self.useOldFolder = True
return True
#check if the fist term entered is the subject identifier (s1, s2, etc.)
#store the subject identifier, then remove it from the list.
#check for aditional filename info
if 's' == splitInput[0][0]:
if( 1 < len(splitInput[0]) and splitInput[0][1].isdigit()):
self.subjectIdentifier = splitInput[0]
splitInput.pop(0)
checkData = splitInput.pop(0)
# increment folder by letter if input is 'inc'/ or set increment to 'a'
if 'inc' == checkData and not None == self.increment:
self.increment = chr(ord(self.increment) + 1)
print (self.increment)
return True
else:
self.increment = 'a'
#muscle Contractions
if 'mc' == checkData or 'ms' == checkData or 'mus' in checkData:
self.collectedDataType = "muscleContractions"
return True
#swelling: cast or socket or just swelling
elif 's' == checkData or'sw' in checkData:
self.collectedDataType = "swelling"
if(0 < len(splitInput)):
if 'c' == splitInput[0] or 'cast' == splitInput[0]:
self.collectedDataType +="Cast"
elif 's' == splitInput[0] or 'so' in splitInput[0]:
self.collectedDataType += "Socket"
return True
elif 'sc' == checkData:
self.collectedDataType = "swellingCast"
return True
elif 'ss' == checkData:
self.collectedDataType = "swellingSocket"
return True
#reference
elif 'r' == checkData or 're' in checkData:
self.collectedDataType = "ref"
return True
#indentations
elif 'i' == checkData or 'in' == checkData or 'ind' in checkData:
self.collectedDataType = "indentation"
if(0 < len(splitInput)):
if splitInput[0].isdigit():
self.collectedDataType += '-' + splitInput[0]
return True
elif 'test' == checkData:
self.collectedDataType = 'test'
self.subjectIdentifier = 'test'
else:
return False
# Check if all pieces of the foldername exist
# generates folder & returns True or returns false if it cant
def generateFolder(self):
if self.useOldFolder:
return True
elif None == self.subjectIdentifier or None == self.collectedDataType or None == self.increment:
return False
else:
while (os.path.exists(self.generateFolderName())):
self.increment = self.increment = chr(ord(self.increment) + 1)
os.mkdir(self.newFolderName)
return True
def generateFolderName(self):
self.newFolderName = self.homeDir + '\\' + self.subjectIdentifier + '_' + self.date + '_' + self.collectedDataType + '_' + self.increment
return self.newFolderName
# files are indexed by time (001, 002, 003 etc), if files are going to be copied to a folder that already has images in it, then this checks what they index of the highest file is
def indexLocal(self):
fileList = os.listdir(self.newFolderName)
#for an empty folder set index of 1
if(0 == len(fileList)):
return 1
else:
indexList = []
# print (fileList)
for item in fileList:
# print (item)
itemLen = len(item)
itemIndex = item[4:itemLen-16]
indexList.append(int(itemIndex))
# print (indexList)
# print ("sorted")
sortedIndex = sorted(indexList)
print (sortedIndex[-1])
return (sortedIndex[-1] + 1)
# This is the string the user sees when starting the program.
# It provides instructions of the legal inputs
introString = """
File copying instructions:
If this is the first file for this subject enter the subject identifier eg. 's1' followed by the following folder type.
The subject identifier can be left out if the previous folder has the identifier.
Folder types:
test: enter 'test' and both subject identifier and data type will be set to 'test'
ref: enter 'r' or 'ref'
muscle contractions: enter 'mc', 'ms', or string that includes 'mus' such as 'muscle'
swelling: enter 's', 'sw', 'swell', or swelling
for swelling cast enter 'sc' or enter the above followed by ' c'
for swelling socket enter 'ss' or enter the above followed by ' s'
indentation: enter 'i', in', or string including 'ind' followed by a space and the indentation number
To increment the previous folder either enter 'inc'
To increment an earlier folder (that is not the most recent folder) enter the folder type and it will be auto incremented.
To copy again to the most recent folder hit enter with no arguments.
enter "cntrl c" to quit
"""
def foldersetup(argv):
argv.pop(0)
myfolder = FolderName()
if(0 < len(argv)):
myfolder.checkInput(argv)
else:
print (introString)
try:
print ('The most recent folder is *** ' + myfolder.lastFolder + ' ***')
except:
pass
myfolder.checkInput(input().split())
if myfolder.generateFolder():
print ("generating new folder")
print (myfolder.newFolderName)
return myfolder
else:
return None
def main():
# threads = []
folder = foldersetup(sys.argv)
path = folder.newFolderName
if(not (None == path)):
fileCopier = FileCopy()
index = folder.indexLocal()
for h in fileCopier.hosts:
t = threading.Thread(target=workon, args=(h, path, index))
t.start()
threads.append(t)
for t in threads:
t.join
if __name__ == "__main__":
print ("sshCopy is main")
main()
# clusterssh [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected]
| getLastFolder | identifier_name |
sshCopy.py | #!/usr/bin/python
import paramiko
import sys
import os
import string
import threading
import subprocess
import time
import select
import datetime
from os.path import expanduser
import qs
global threads
threads = []
global upload
upload = False
class FileCopy:
def __init__(self):
self.numCams = 21
self.hosts = self.hostInit()
self.filesTransferred = 0
self.filesToTransfer = 0
def hostInit(self):
hosts = []
for i in range(1, 1 + self.numCams):
num = "%02d" % (i,)
hosts.append('192.168.0.2' + num)
return hosts
#pings the address
#has a really short timeout. With the devices on a local network the connected devices respond really fast. So there is no need to take extra time.
def ping(address):
# print (address)
try:
output = subprocess.check_output(["ping.exe", "-n", "1", "-w", "1", str(address)])
# on windows a ping that doesn't get a response does not return -1, unless it fails because of a timeout
if (b'unreachable' in output):
# print("Offline")
return False
else:
# print ("online")
return True
except Exception as e:
# a timeout will return -1
if('non-zero' in str(e)):
pass
# print ('timedout')
else:
print (e)
# print ("offline / didnt work")
return False
def workon(host,localDir, indexStart):
if ping(host):
# print (host)
ssh = paramiko.SSHClient()
# print ('client created' + str(host))
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# print ('set missing key policy' + str(host))
ssh.connect(host, username='pi', password='biomech1')
print ('connected' + str(host))
#######
# setup connection to pi
#########
sftp = ssh.open_sftp()
piDir = '/home/pi/piTemp'
#######
# copy files from raspi
##########
copyFiles(sftp, piDir, host, localDir, indexStart)
else:
pass
def copyFiles(sftp, piDir, host, localDir, indexStart):
fileList = sftp.listdir(piDir)
sortedFiles = sorted(fileList)
fileListLength = len(sortedFiles)
index = indexStart
allCopied = True #this gets set to false if there is an exception
for count, file in enumerate(sortedFiles):
try:
print ('trying to get file ' + str(count + 1) + ' of ' + str(fileListLength) + ' from ' + host[10:13] )
indexString = str(index)
if(index < 10):
indexString = "00" + str(index)
elif(index > 9 and index < 100):
indexString = "0" + str(index)
#grab the file from the pi, add index & host name to the file.
sftp.get((piDir + '/' +file),(localDir + '/' + host[10:13] + '_' + indexString + '_' + rmvIlligal(file) + '.jpg'))
index += 1
except Exception as e:
allCopied = False
print (str(e))
print ('couldnt get photo ' + file + ' from host ' + host[10:13])
# if all the photos were succesfully copied then delete the originals
if(allCopied):
for file in sortedFiles:
try:
sftp.remove((piDir + '/' + file))
print (host[10:13] + ' ' + file + ' removed')
except Exception as e:
print (e)
print ("done " + host)
def rmvIlligal(input):
# print (input)
valid_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
output = ''
for c in input:
if c in valid_chars:
output += c
length = len(output)
return output[0:11]
class FolderName():
|
# This is the string the user sees when starting the program.
# It provides instructions of the legal inputs
introString = """
File copying instructions:
If this is the first file for this subject enter the subject identifier eg. 's1' followed by the following folder type.
The subject identifier can be left out if the previous folder has the identifier.
Folder types:
test: enter 'test' and both subject identifier and data type will be set to 'test'
ref: enter 'r' or 'ref'
muscle contractions: enter 'mc', 'ms', or string that includes 'mus' such as 'muscle'
swelling: enter 's', 'sw', 'swell', or swelling
for swelling cast enter 'sc' or enter the above followed by ' c'
for swelling socket enter 'ss' or enter the above followed by ' s'
indentation: enter 'i', in', or string including 'ind' followed by a space and the indentation number
To increment the previous folder either enter 'inc'
To increment an earlier folder (that is not the most recent folder) enter the folder type and it will be auto incremented.
To copy again to the most recent folder hit enter with no arguments.
enter "cntrl c" to quit
"""
def foldersetup(argv):
argv.pop(0)
myfolder = FolderName()
if(0 < len(argv)):
myfolder.checkInput(argv)
else:
print (introString)
try:
print ('The most recent folder is *** ' + myfolder.lastFolder + ' ***')
except:
pass
myfolder.checkInput(input().split())
if myfolder.generateFolder():
print ("generating new folder")
print (myfolder.newFolderName)
return myfolder
else:
return None
def main():
# threads = []
folder = foldersetup(sys.argv)
path = folder.newFolderName
if(not (None == path)):
fileCopier = FileCopy()
index = folder.indexLocal()
for h in fileCopier.hosts:
t = threading.Thread(target=workon, args=(h, path, index))
t.start()
threads.append(t)
for t in threads:
t.join
if __name__ == "__main__":
print ("sshCopy is main")
main()
# clusterssh [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected]
| def __init__(self):
self.subjectIdentifier = None
self.increment = None
self.homeDir = self.getDirectory()
self.lastFolder = self.getLastFolder()
self.date = self.getDate()
self.newFolderName = ""
self.useOldFolder = False
# check the home directory
# setup scanFolder in the documents folder if it doesn't already exist
def getDirectory(self):
home = expanduser("~")
scanFolder = '\Documents\ScanFolder'
homeDir = home + scanFolder
if not os.path.exists(homeDir):
os.mkdir(homeDir)
return homeDir
# this is it's own fuction in case the format of the date needs to be changed
def getDate(self):
date = str(datetime.date.today())
# date = date.replace("-", "_")
return date
#check scanfolder directory for the most recent folder
def getLastFolder(self):
folder = list(os.scandir(self.homeDir))
# check for most recent folder name
# if the most recent folder is a calibration folder then ignore it.
if( 0 < len(folder)):
sortedFolders = sorted(folder, key = lambda x: x.stat().st_mtime, reverse = True) #sort the folders by time they were modified
#if the most recent file is a calibration file, then ignore it.
while('s' not in sortedFolders[0].name):
sortedFolders.pop(0)
if(0 == len(sortedFolders)):
return None
oldSplit = sortedFolders[0].name.split('_')
if(4 == len(oldSplit)):
self.subjectIdentifier = oldSplit[0]
self.collectedDataType = oldSplit[2]
self.increment = oldSplit[3]
return sortedFolders[0].name
else:
return None
else:
print (" *** There are no previous files *** ")
return None
# Checks the user input
# determins the folder name
# if the input is valid it returns True
def checkInput(self,userInput):
splitInput = userInput
# check if input is 'enter', this should then use the most recent folder as the current folder
if [] == splitInput:
self.newFolderName = self.homeDir + '\\' + self.lastFolder
self.useOldFolder = True
return True
#check if the fist term entered is the subject identifier (s1, s2, etc.)
#store the subject identifier, then remove it from the list.
#check for aditional filename info
if 's' == splitInput[0][0]:
if( 1 < len(splitInput[0]) and splitInput[0][1].isdigit()):
self.subjectIdentifier = splitInput[0]
splitInput.pop(0)
checkData = splitInput.pop(0)
# increment folder by letter if input is 'inc'/ or set increment to 'a'
if 'inc' == checkData and not None == self.increment:
self.increment = chr(ord(self.increment) + 1)
print (self.increment)
return True
else:
self.increment = 'a'
#muscle Contractions
if 'mc' == checkData or 'ms' == checkData or 'mus' in checkData:
self.collectedDataType = "muscleContractions"
return True
#swelling: cast or socket or just swelling
elif 's' == checkData or'sw' in checkData:
self.collectedDataType = "swelling"
if(0 < len(splitInput)):
if 'c' == splitInput[0] or 'cast' == splitInput[0]:
self.collectedDataType +="Cast"
elif 's' == splitInput[0] or 'so' in splitInput[0]:
self.collectedDataType += "Socket"
return True
elif 'sc' == checkData:
self.collectedDataType = "swellingCast"
return True
elif 'ss' == checkData:
self.collectedDataType = "swellingSocket"
return True
#reference
elif 'r' == checkData or 're' in checkData:
self.collectedDataType = "ref"
return True
#indentations
elif 'i' == checkData or 'in' == checkData or 'ind' in checkData:
self.collectedDataType = "indentation"
if(0 < len(splitInput)):
if splitInput[0].isdigit():
self.collectedDataType += '-' + splitInput[0]
return True
elif 'test' == checkData:
self.collectedDataType = 'test'
self.subjectIdentifier = 'test'
else:
return False
# Check if all pieces of the foldername exist
# generates folder & returns True or returns false if it cant
def generateFolder(self):
if self.useOldFolder:
return True
elif None == self.subjectIdentifier or None == self.collectedDataType or None == self.increment:
return False
else:
while (os.path.exists(self.generateFolderName())):
self.increment = self.increment = chr(ord(self.increment) + 1)
os.mkdir(self.newFolderName)
return True
def generateFolderName(self):
self.newFolderName = self.homeDir + '\\' + self.subjectIdentifier + '_' + self.date + '_' + self.collectedDataType + '_' + self.increment
return self.newFolderName
# files are indexed by time (001, 002, 003 etc), if files are going to be copied to a folder that already has images in it, then this checks what they index of the highest file is
def indexLocal(self):
fileList = os.listdir(self.newFolderName)
#for an empty folder set index of 1
if(0 == len(fileList)):
return 1
else:
indexList = []
# print (fileList)
for item in fileList:
# print (item)
itemLen = len(item)
itemIndex = item[4:itemLen-16]
indexList.append(int(itemIndex))
# print (indexList)
# print ("sorted")
sortedIndex = sorted(indexList)
print (sortedIndex[-1])
return (sortedIndex[-1] + 1) | identifier_body |
sshCopy.py | #!/usr/bin/python
import paramiko
import sys
import os
import string
import threading
import subprocess
import time
import select
import datetime
from os.path import expanduser
import qs
global threads
threads = []
global upload
upload = False
class FileCopy:
def __init__(self):
self.numCams = 21
self.hosts = self.hostInit()
self.filesTransferred = 0
self.filesToTransfer = 0
def hostInit(self):
hosts = []
for i in range(1, 1 + self.numCams):
num = "%02d" % (i,)
hosts.append('192.168.0.2' + num)
return hosts
#pings the address
#has a really short timeout. With the devices on a local network the connected devices respond really fast. So there is no need to take extra time.
def ping(address):
# print (address)
try:
output = subprocess.check_output(["ping.exe", "-n", "1", "-w", "1", str(address)])
# on windows a ping that doesn't get a response does not return -1, unless it fails because of a timeout
if (b'unreachable' in output):
# print("Offline")
return False
else:
# print ("online")
return True
except Exception as e:
# a timeout will return -1
if('non-zero' in str(e)):
pass
# print ('timedout')
else:
print (e)
# print ("offline / didnt work")
return False
def workon(host,localDir, indexStart):
if ping(host):
# print (host)
ssh = paramiko.SSHClient()
# print ('client created' + str(host))
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# print ('set missing key policy' + str(host))
ssh.connect(host, username='pi', password='biomech1')
print ('connected' + str(host))
#######
# setup connection to pi
#########
sftp = ssh.open_sftp()
piDir = '/home/pi/piTemp'
#######
# copy files from raspi
##########
copyFiles(sftp, piDir, host, localDir, indexStart)
else:
pass
def copyFiles(sftp, piDir, host, localDir, indexStart):
fileList = sftp.listdir(piDir)
sortedFiles = sorted(fileList)
fileListLength = len(sortedFiles)
index = indexStart
allCopied = True #this gets set to false if there is an exception
for count, file in enumerate(sortedFiles):
try:
print ('trying to get file ' + str(count + 1) + ' of ' + str(fileListLength) + ' from ' + host[10:13] )
indexString = str(index)
if(index < 10):
indexString = "00" + str(index)
elif(index > 9 and index < 100):
indexString = "0" + str(index)
#grab the file from the pi, add index & host name to the file.
sftp.get((piDir + '/' +file),(localDir + '/' + host[10:13] + '_' + indexString + '_' + rmvIlligal(file) + '.jpg'))
index += 1
except Exception as e:
allCopied = False
print (str(e))
print ('couldnt get photo ' + file + ' from host ' + host[10:13])
# if all the photos were succesfully copied then delete the originals
if(allCopied):
for file in sortedFiles:
try:
sftp.remove((piDir + '/' + file))
print (host[10:13] + ' ' + file + ' removed')
except Exception as e:
print (e)
print ("done " + host)
def rmvIlligal(input):
# print (input)
valid_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
output = ''
for c in input:
if c in valid_chars:
output += c
length = len(output)
return output[0:11]
class FolderName():
def __init__(self):
self.subjectIdentifier = None
self.increment = None
self.homeDir = self.getDirectory()
self.lastFolder = self.getLastFolder()
self.date = self.getDate()
self.newFolderName = ""
self.useOldFolder = False
# check the home directory
# setup scanFolder in the documents folder if it doesn't already exist
def getDirectory(self):
home = expanduser("~")
scanFolder = '\Documents\ScanFolder'
homeDir = home + scanFolder
if not os.path.exists(homeDir):
os.mkdir(homeDir)
return homeDir
# this is it's own fuction in case the format of the date needs to be changed
def getDate(self):
date = str(datetime.date.today())
# date = date.replace("-", "_")
return date
#check scanfolder directory for the most recent folder
def getLastFolder(self):
folder = list(os.scandir(self.homeDir))
# check for most recent folder name
# if the most recent folder is a calibration folder then ignore it.
if( 0 < len(folder)):
sortedFolders = sorted(folder, key = lambda x: x.stat().st_mtime, reverse = True) #sort the folders by time they were modified
#if the most recent file is a calibration file, then ignore it.
while('s' not in sortedFolders[0].name):
sortedFolders.pop(0)
if(0 == len(sortedFolders)):
return None
oldSplit = sortedFolders[0].name.split('_')
if(4 == len(oldSplit)):
self.subjectIdentifier = oldSplit[0]
self.collectedDataType = oldSplit[2]
self.increment = oldSplit[3]
return sortedFolders[0].name
else:
return None
else:
print (" *** There are no previous files *** ")
return None
# Checks the user input
# determins the folder name
# if the input is valid it returns True
def checkInput(self,userInput):
splitInput = userInput
# check if input is 'enter', this should then use the most recent folder as the current folder
if [] == splitInput:
self.newFolderName = self.homeDir + '\\' + self.lastFolder
self.useOldFolder = True
return True
#check if the fist term entered is the subject identifier (s1, s2, etc.)
#store the subject identifier, then remove it from the list.
#check for aditional filename info
if 's' == splitInput[0][0]:
if( 1 < len(splitInput[0]) and splitInput[0][1].isdigit()):
self.subjectIdentifier = splitInput[0]
splitInput.pop(0)
checkData = splitInput.pop(0)
# increment folder by letter if input is 'inc'/ or set increment to 'a'
if 'inc' == checkData and not None == self.increment:
self.increment = chr(ord(self.increment) + 1)
print (self.increment)
return True
else:
self.increment = 'a'
#muscle Contractions
if 'mc' == checkData or 'ms' == checkData or 'mus' in checkData:
|
#swelling: cast or socket or just swelling
elif 's' == checkData or'sw' in checkData:
self.collectedDataType = "swelling"
if(0 < len(splitInput)):
if 'c' == splitInput[0] or 'cast' == splitInput[0]:
self.collectedDataType +="Cast"
elif 's' == splitInput[0] or 'so' in splitInput[0]:
self.collectedDataType += "Socket"
return True
elif 'sc' == checkData:
self.collectedDataType = "swellingCast"
return True
elif 'ss' == checkData:
self.collectedDataType = "swellingSocket"
return True
#reference
elif 'r' == checkData or 're' in checkData:
self.collectedDataType = "ref"
return True
#indentations
elif 'i' == checkData or 'in' == checkData or 'ind' in checkData:
self.collectedDataType = "indentation"
if(0 < len(splitInput)):
if splitInput[0].isdigit():
self.collectedDataType += '-' + splitInput[0]
return True
elif 'test' == checkData:
self.collectedDataType = 'test'
self.subjectIdentifier = 'test'
else:
return False
# Check if all pieces of the foldername exist
# generates folder & returns True or returns false if it cant
def generateFolder(self):
if self.useOldFolder:
return True
elif None == self.subjectIdentifier or None == self.collectedDataType or None == self.increment:
return False
else:
while (os.path.exists(self.generateFolderName())):
self.increment = self.increment = chr(ord(self.increment) + 1)
os.mkdir(self.newFolderName)
return True
def generateFolderName(self):
self.newFolderName = self.homeDir + '\\' + self.subjectIdentifier + '_' + self.date + '_' + self.collectedDataType + '_' + self.increment
return self.newFolderName
# files are indexed by time (001, 002, 003 etc), if files are going to be copied to a folder that already has images in it, then this checks what they index of the highest file is
def indexLocal(self):
fileList = os.listdir(self.newFolderName)
#for an empty folder set index of 1
if(0 == len(fileList)):
return 1
else:
indexList = []
# print (fileList)
for item in fileList:
# print (item)
itemLen = len(item)
itemIndex = item[4:itemLen-16]
indexList.append(int(itemIndex))
# print (indexList)
# print ("sorted")
sortedIndex = sorted(indexList)
print (sortedIndex[-1])
return (sortedIndex[-1] + 1)
# This is the string the user sees when starting the program.
# It provides instructions of the legal inputs
introString = """
File copying instructions:
If this is the first file for this subject enter the subject identifier eg. 's1' followed by the following folder type.
The subject identifier can be left out if the previous folder has the identifier.
Folder types:
test: enter 'test' and both subject identifier and data type will be set to 'test'
ref: enter 'r' or 'ref'
muscle contractions: enter 'mc', 'ms', or string that includes 'mus' such as 'muscle'
swelling: enter 's', 'sw', 'swell', or swelling
for swelling cast enter 'sc' or enter the above followed by ' c'
for swelling socket enter 'ss' or enter the above followed by ' s'
indentation: enter 'i', in', or string including 'ind' followed by a space and the indentation number
To increment the previous folder either enter 'inc'
To increment an earlier folder (that is not the most recent folder) enter the folder type and it will be auto incremented.
To copy again to the most recent folder hit enter with no arguments.
enter "cntrl c" to quit
"""
def foldersetup(argv):
argv.pop(0)
myfolder = FolderName()
if(0 < len(argv)):
myfolder.checkInput(argv)
else:
print (introString)
try:
print ('The most recent folder is *** ' + myfolder.lastFolder + ' ***')
except:
pass
myfolder.checkInput(input().split())
if myfolder.generateFolder():
print ("generating new folder")
print (myfolder.newFolderName)
return myfolder
else:
return None
def main():
# threads = []
folder = foldersetup(sys.argv)
path = folder.newFolderName
if(not (None == path)):
fileCopier = FileCopy()
index = folder.indexLocal()
for h in fileCopier.hosts:
t = threading.Thread(target=workon, args=(h, path, index))
t.start()
threads.append(t)
for t in threads:
t.join
if __name__ == "__main__":
print ("sshCopy is main")
main()
# clusterssh [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected] [email protected]
| self.collectedDataType = "muscleContractions"
return True | conditional_block |
DynamicInput.js | /* eslint-disable @typescript-eslint/no-empty-function */
import React, { useState, useEffect, useCallback } from 'react';
import { connect } from 'react-redux';
import PropTypes from 'prop-types';
import lodashGet from 'lodash/get';
import lodashFind from 'lodash/find';
import lodashIsEqual from 'lodash/isEqual';
import classNames from 'classnames';
import lodashDebounce from 'lodash/debounce';
import { useLocale } from 'react-admin';
import { makeStyles } from '@material-ui/core/styles';
import TextInput from '../component/input/TextInput';
import {
isEnterPressed,
isCtrlEnterPressed,
isDownPressed,
isUpPressed,
} from '../helper/FormHelper';
import { getTypeByField, BOOLEAN_FIELD, DATE_FIELD } from '../helper/InputHelper';
import { isEmpty, isEmptyObject, clone } from '../helper/DataHelper';
import { getDropDownListFromState } from '../helper/MetaHelper';
const useStyles = makeStyles(theme => ({
inputStyle: {
width: '100%',
height: '100%',
backgroundColor: theme.palette.background.paper,
'& input': {
padding: 13,
lineHeight: 0,
},
'& label': {
transform: 'scale(1) translate(14px, 16px)',
fontSize: 13,
},
'& div': {
fontSize: 13,
height: '100%',
},
margin: 0,
[theme.breakpoints.down('md')]: {
'& label': {
transform: 'scale(1) translate(14px, 12px)',
},
},
[theme.breakpoints.down('sm')]: {
width: '100%',
margin: 0,
'& input': {
padding: 7,
lineHeight: 0,
},
'& label': {
transform: 'scale(1) translate(14px, 10px)',
fontSize: 10,
},
'& div': {
fontSize: 10,
},
},
},
longInputStyle: {
width: '100%',
height: '100%',
'& > div': {
padding: 13,
},
},
longTextInputStyleNoLabel: {
borderRadius: 4,
width: '100%',
height: '100%',
'& > div': {
padding: 10,
},
},
booleanStyle: {
height: '100%',
border: '1px solid rgba(0, 0, 0, 0.23)',
backgroundColor: theme.palette.background.paper,
borderRadius: theme.shape.borderRadius,
padding: '0 3px',
margin: 0,
width: '100%',
'& label': {
height: '100%',
margin: 0,
'& span': {
fontSize: 13,
},
},
'&[data-focus=false]:hover': {
border: `1px solid`,
},
'&[data-focus=true]': {
border: `1px solid ${theme.palette.primary.main}`,
boxShadow: `inset 0px 0px 0px 1px ${theme.palette.primary.main}`,
},
'&[data-blur=true]': {
boxShadow: 'none',
border: '1px solid rgba(0, 0, 0, 0.23)',
},
position: 'relative',
},
inputStyleNoLabel: {
borderRadius: 4,
width: '100%',
height: '100%',
'&[data-disabled-for-style=true]': {
backgroundColor: theme.palette.grey[300],
},
'& input': {
padding: 10,
lineHeight: 0,
},
},
inputStylePuzzleForm: {
margin: '7px 3px 0',
'&[data-disabled-for-style=true]': {
backgroundColor: theme.palette.grey[300],
},
},
}));
const TRIGGER_SUBMIT_DEBOUNCE = 200;
const defaultGlobalParameters = {};
const DynamicInput = props => {
const [myRef, setMyRef] = useState(null);
const {
dispatch, // get it out
changeFormValue,
addToRelationArray,
field,
source,
resource,
filterMode,
disableDropdownQuickCreate,
disableDropdownSearchPopup,
defaultOperator,
onlyEqualCondition,
getInputRef, // get it out
triggerFormSubmit,
noLabel,
label,
focusOnNextInput,
updateDefaultValue, // get it out
addToIgnoreFocusOnInit, // get it out
getRelationRef, // it should be used by "RelationFormIterator"
recordPath,
inputInPuzzleForm,
inputInQuickCreateDialog,
globalParameters,
alwaysOn,
allowEmpty,
changeFocusOnAnotherRow,
relationResource,
relationSource,
relationInfo,
metaData,
relationPath,
formData,
dropdownState,
findDropdownData,
validationErrors,
initialValues,
viewVersion,
clearValidationErrorForThisField,
isCreateMode,
version,
updateEditedFormData,
additionalProps,
...rest
} = props;
const locale = useLocale();
const classes = useStyles();
const [uiVisibled, setUiVisibled] = useState(true);
const [uiEnabled, setUiEnabled] = useState(true);
const [isLoading, setIsLoading] = useState(false);
const [relatedDropDownValue, setRelatedDropDownValue] = useState({});
const [prevInitialValues, setPrevInitialValues] = useState(initialValues);
const [prevVersion, setPrevVersion] = useState(0);
useEffect(() => {
// for handle initial values
const { name } = field;
const computedVersion = version ? version : viewVersion;
const tabChangeCondition =
prevVersion !== computedVersion &&
prevInitialValues[name] === null &&
initialValues[name] === null;
if (inputInPuzzleForm || inputInQuickCreateDialog || (isCreateMode && computedVersion)) {
if (!lodashIsEqual(prevInitialValues, initialValues) || tabChangeCondition) {
changeFormValue(
name,
!isEmptyObject(initialValues) && !isEmpty(initialValues[name])
? initialValues[name]
: getTypeByField(field) === BOOLEAN_FIELD
? false
: null,
getTypeByField(field) === DATE_FIELD ? true : false,
);
setPrevInitialValues(initialValues);
}
setPrevVersion(computedVersion);
}
}, [initialValues, version]);
// component did mount
useEffect(() => {
const {
defaultValue,
name,
keepFocusAfterSubmit,
keepValueAfterSubmit,
globalParameter,
} = field;
if (!isEmpty(globalParameter)) {
updateDefaultValue(name, globalParameters[globalParameter.toLowerCase()]);
}
if (!isEmpty(defaultValue) || keepValueAfterSubmit) {
const isBoolean = getTypeByField(field) === BOOLEAN_FIELD;
// because boolean fields default value is '1' or '0' in field object !
// this variable below turn the value to true or false if it was BOOLEAN_FIELD.
let adoptedDefaultValue = defaultValue;
if (isBoolean && defaultValue == '0') {
adoptedDefaultValue = false;
} else if (isBoolean && defaultValue == '1') {
adoptedDefaultValue = true;
} else {
// if it wasn't a BOOLEAN_FIELD
adoptedDefaultValue = defaultValue;
}
updateDefaultValue(
name,
initialValues && (initialValues[name] || initialValues[name] === false)
? initialValues[name]
: adoptedDefaultValue,
);
}
if (!keepFocusAfterSubmit && typeof addToIgnoreFocusOnInit === 'function') {
addToIgnoreFocusOnInit(name);
}
}, []);
useEffect(() => {
if (field.uiEnable) getRelatedDropdownValue('uiEnable');
if (field.uiVisible) getRelatedDropdownValue('uiVisible');
}, [formData, dropdownState]);
useEffect(() => {
if (field.uiEnable || field.uiVisible) checkUi();
}, [formData, relatedDropDownValue, dropdownState]);
/**
* check `field` and run `javascriptUiEnable` and `javascriptUiVisible` beased on `formData` and `relatedDropDownValue` and then ran `setUiVisibled` or `setUiEnabled`
* @returns {void}
*/
const checkUi = useCallback(() => {
const { uiVisible, uiEnable, javaScriptUiVisible, javaScriptUiEnable } = field;
if (uiVisible && uiVisible.length && javaScriptUiVisible) {
if (javaScriptUiVisible.indexOf('return') !== -1) {
if (field.uiVisible[0][1] !== '' && relatedDropDownValue) {
try {
const execute = new Function('relatedDropDownValue', `${javaScriptUiVisible}`);
setUiVisibled(execute(clone(relatedDropDownValue)));
} catch (error) {
console.log('javaScriptUiVisible error on %s', field.name, error);
}
} else if (field.uiVisible[0][1] === '' && formData && !isEmptyObject(formData)) |
}
}
if (uiEnable && uiEnable.length && javaScriptUiEnable) {
if (javaScriptUiEnable.indexOf('return') !== -1) {
if (field.uiEnable[0][1] !== '' && relatedDropDownValue) {
try {
const execute = new Function('relatedDropDownValue', `${javaScriptUiEnable}`);
setUiEnabled(execute(clone(relatedDropDownValue)));
} catch (error) {
console.log('javaScriptUiEnable error on %s', field.name, error);
}
} else if (field.uiEnable[0][1] === '' && formData && !isEmptyObject(formData)) {
try {
const execute = new Function('formData', `${javaScriptUiEnable}`);
setUiEnabled(!!execute(clone(formData)));
} catch (error) {
console.log('javaScriptUiEnable error on %s', field.name, error);
}
}
}
}
}, [field, formData, relatedDropDownValue]);
/**
* check field and get dropdown data from redux state
* @param {String} uiCheckType
* @returns {void}
*/
const getRelatedDropdownValue = useCallback(
uiCheckType => {
if (!isLoading && dropdownState === undefined && isEmptyObject(relatedDropDownValue)) {
// Fetch dropdwon data
const fieldName = field[uiCheckType][0][0];
const dropdownMeta = lodashFind(metaData.fields, { name: fieldName }).dropdown;
setIsLoading(true);
} else {
// Use dropdwon data which is availabe in Redux State
setIsLoading(false);
const fieldName = field[uiCheckType][0][0];
const dropName = field[uiCheckType][0][1];
const selectedDropColId = formData[fieldName] !== null ? +formData[fieldName] : null;
if (selectedDropColId !== null) {
const dropdownSelectedCol = lodashFind(dropdownState, {
id: selectedDropColId,
});
if (dropdownSelectedCol) {
setRelatedDropDownValue({ [dropName]: dropdownSelectedCol[dropName] });
}
} else {
setRelatedDropDownValue({});
}
}
},
[dropdownState, formData],
);
const triggerSubmit = lodashDebounce(() => {
const { triggerFormSubmit } = props;
if (typeof triggerFormSubmit === 'function') {
triggerFormSubmit();
} else {
console.log('triggerFormSubmit is not defined, so can not trigger submit');
}
}, TRIGGER_SUBMIT_DEBOUNCE);
const triggerGoToNext = () => {
const { focusOnNextInput, field } = props;
if (typeof focusOnNextInput === 'function') {
focusOnNextInput(field.name);
}
};
const triggerChangeFocusOnAnotherRow = isMoveToNext => {
const { changeFocusOnAnotherRow, field } = props;
if (typeof changeFocusOnAnotherRow === 'function') {
changeFocusOnAnotherRow(field.name, isMoveToNext);
}
};
const handleKeyDown = event => {
// if form is submitted
if (isCtrlEnterPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerSubmit();
}
// if wants to go to next element
else if (isEnterPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerGoToNext();
} else if (isDownPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerChangeFocusOnAnotherRow(true);
} else if (isUpPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerChangeFocusOnAnotherRow(false);
}
};
/**
* this function will recive a new value and also extract the name of field from props
* then handle the form change value and update form value , default value and editted foem data.
* @function internalOnChange
* @param {string|number|object|Array} value
* @param {string|number|object|Array} previousValue
* @param {string} fieldName
* @returns {void}
*/
const internalOnChange = (value, previousValue, fieldName) => {
const { keepValueAfterSubmit, name } = field;
let tempValue = value;
// eslint-disable-next-line no-prototype-builtins
if (value && typeof value === 'object' && value.hasOwnProperty('nativeEvent')) {
tempValue = value.target.checked; // only BooleanInput
}
// if value should be kept when form is reset, keep value in default value
if (keepValueAfterSubmit) {
updateDefaultValue(name, tempValue);
}
if (typeof changeFormValue === 'function') {
changeFormValue(name, tempValue);
}
// for validation in relation
if (typeof clearValidationErrorForThisField === 'function') {
clearValidationErrorForThisField(name);
}
};
const isFocusable = () => {
const { field, disabled } = props;
return !disabled && !field.readOnly && !field.disabled && uiEnabled;
};
const getDynamicInputRef = ref => {
const { field, getInputRef = () => {} } = props;
setMyRef(ref);
if (isFocusable()) {
getInputRef(field.name, ref, resource);
}
};
const handleClick = () => {
if (myRef && typeof myRef.select === 'function') {
myRef.select();
}
};
const isInputFocusable = isFocusable();
let customError = null;
// TODO refactor to seprate function
if (validationErrors && validationErrors.length) {
const selectedError = validationErrors.filter(err => err.id === field.id);
if (selectedError && selectedError.length) {
customError = selectedError[0].message;
}
}
const inputProps = {
'data-test-field-name': field.name,
'data-test-max-value': field.maxValue,
'data-test-min-value': field.minValue,
'data-test-max-length': field.maxLength ? field.maxLength : 'dosent_matter',
'data-test-field-type': getTypeByField(field),
'data-test-field-hidden': field.hidden ? field.hidden : null,
field,
formData,
source,
resource,
label: !noLabel ? label || lodashGet(field, ['translatedCaption', locale], field.caption) : '',
// required: field.required, // validation will handle required
disabled: !isInputFocusable,
options: {
inputProps: {
disabled: !isInputFocusable,
},
inputRef: getDynamicInputRef,
onKeyDown: isInputFocusable ? handleKeyDown : undefined,
},
onChange: internalOnChange,
visibleClass: uiVisibled && !field.hidden ? '' : 'displayNone',
customError: customError,
viewVersion,
};
if (field.widthPercent) {
inputProps.style = { width: `${field.widthPercent}%` };
}
let inputComponent;
switch (getTypeByField(field)) {
default:
inputComponent = (
<TextInput
{...rest}
{...inputProps}
className={classNames(
classes.inputStyle,
inputInPuzzleForm
? classes.inputStylePuzzleForm
: noLabel
? classes.inputStyleNoLabel
: null,
)}
onClick={handleClick}
/>
);
}
return inputComponent;
};
DynamicInput.defaultProps = {
updateDefaultValue: () => {},
addToIgnoreFocusOnInit: () => {},
};
DynamicInput.propTypes = {
field: PropTypes.object.isRequired,
record: PropTypes.object.isRequired,
source: PropTypes.string.isRequired, // must be defined
triggerFormSubmit: PropTypes.func,
changeFormValue: PropTypes.func,
filterMode: PropTypes.bool,
disableDropdownQuickCreate: PropTypes.bool,
disableDropdownSearchPopup: PropTypes.bool,
noLabel: PropTypes.bool,
inputInPuzzleForm: PropTypes.bool,
defaultOperator: PropTypes.any,
onlyEqualCondition: PropTypes.any,
getRelationRef: PropTypes.func,
updateDefaultValue: PropTypes.func,
addToIgnoreFocusOnInit: PropTypes.func,
focusOnNextInput: PropTypes.func,
changeFocusOnAnotherRow: PropTypes.func,
relationResource: PropTypes.string,
label: PropTypes.string,
validationErrors: PropTypes.object,
initialValues: PropTypes.object,
clearValidationErrorForThisField: PropTypes.func,
additionalProps: PropTypes.object,
};
const mapStateToProps = (state, props) => {
const extraProps = {
globalParameters: lodashGet(state, 'profile.globalParameters', defaultGlobalParameters),
dropdownState: {},
viewVersion: state.admin.ui.viewVersion,
};
const { field, metaData } = props;
if (
field.uiVisible &&
field.uiVisible &&
field.uiVisible.length &&
field.uiVisible[0].length === 3 &&
field.uiVisible[0][1] !== ''
) {
extraProps.dropdownState = getDropDownListFromState(field, 'uiVisible', state, metaData);
} else if (
field.uiEnable &&
field.uiEnable &&
field.uiEnable.length &&
field.uiEnable[0].length === 3 &&
field.uiEnable[0][1] !== ''
) {
extraProps.dropdownState = getDropDownListFromState(field, 'uiEnable', state, metaData);
}
return extraProps;
};
const mapDispatchToProps = {};
export default connect(mapStateToProps, mapDispatchToProps)(DynamicInput);
| {
try {
const execute = new Function('formData', `${javaScriptUiVisible}`);
setUiVisibled(execute(clone(formData)));
} catch (error) {
console.log('javaScriptUiVisible error on %s', field.name, error);
}
} | conditional_block |
DynamicInput.js | /* eslint-disable @typescript-eslint/no-empty-function */
import React, { useState, useEffect, useCallback } from 'react';
import { connect } from 'react-redux';
import PropTypes from 'prop-types';
import lodashGet from 'lodash/get';
import lodashFind from 'lodash/find';
import lodashIsEqual from 'lodash/isEqual';
import classNames from 'classnames';
import lodashDebounce from 'lodash/debounce';
import { useLocale } from 'react-admin';
import { makeStyles } from '@material-ui/core/styles';
import TextInput from '../component/input/TextInput';
import {
isEnterPressed,
isCtrlEnterPressed,
isDownPressed,
isUpPressed,
} from '../helper/FormHelper';
import { getTypeByField, BOOLEAN_FIELD, DATE_FIELD } from '../helper/InputHelper';
import { isEmpty, isEmptyObject, clone } from '../helper/DataHelper';
import { getDropDownListFromState } from '../helper/MetaHelper';
const useStyles = makeStyles(theme => ({
inputStyle: {
width: '100%',
height: '100%',
backgroundColor: theme.palette.background.paper,
'& input': {
padding: 13,
lineHeight: 0,
},
'& label': {
transform: 'scale(1) translate(14px, 16px)',
fontSize: 13,
},
'& div': {
fontSize: 13,
height: '100%',
},
margin: 0,
[theme.breakpoints.down('md')]: {
'& label': {
transform: 'scale(1) translate(14px, 12px)',
},
},
[theme.breakpoints.down('sm')]: {
width: '100%',
margin: 0,
'& input': {
padding: 7,
lineHeight: 0,
},
'& label': {
transform: 'scale(1) translate(14px, 10px)',
fontSize: 10,
},
'& div': {
fontSize: 10,
},
},
},
longInputStyle: {
width: '100%',
height: '100%',
'& > div': {
padding: 13,
},
},
longTextInputStyleNoLabel: {
borderRadius: 4,
width: '100%',
height: '100%',
'& > div': {
padding: 10,
},
},
booleanStyle: {
height: '100%',
border: '1px solid rgba(0, 0, 0, 0.23)',
backgroundColor: theme.palette.background.paper,
borderRadius: theme.shape.borderRadius,
padding: '0 3px',
margin: 0,
width: '100%',
'& label': {
height: '100%',
margin: 0,
'& span': {
fontSize: 13,
},
},
'&[data-focus=false]:hover': {
border: `1px solid`,
},
'&[data-focus=true]': {
border: `1px solid ${theme.palette.primary.main}`,
boxShadow: `inset 0px 0px 0px 1px ${theme.palette.primary.main}`,
},
'&[data-blur=true]': {
boxShadow: 'none',
border: '1px solid rgba(0, 0, 0, 0.23)',
},
position: 'relative',
},
inputStyleNoLabel: {
borderRadius: 4,
width: '100%',
height: '100%',
'&[data-disabled-for-style=true]': {
backgroundColor: theme.palette.grey[300],
},
'& input': {
padding: 10,
lineHeight: 0,
},
},
inputStylePuzzleForm: {
margin: '7px 3px 0',
'&[data-disabled-for-style=true]': {
backgroundColor: theme.palette.grey[300],
},
},
}));
const TRIGGER_SUBMIT_DEBOUNCE = 200;
const defaultGlobalParameters = {};
const DynamicInput = props => {
const [myRef, setMyRef] = useState(null);
const {
dispatch, // get it out
changeFormValue,
addToRelationArray,
field,
source,
resource,
filterMode,
disableDropdownQuickCreate,
disableDropdownSearchPopup,
defaultOperator,
onlyEqualCondition,
getInputRef, // get it out
triggerFormSubmit,
noLabel,
label,
focusOnNextInput,
updateDefaultValue, // get it out
addToIgnoreFocusOnInit, // get it out
getRelationRef, // it should be used by "RelationFormIterator"
recordPath,
inputInPuzzleForm,
inputInQuickCreateDialog,
globalParameters,
alwaysOn,
allowEmpty,
changeFocusOnAnotherRow,
relationResource,
relationSource,
relationInfo,
metaData,
relationPath,
formData,
dropdownState,
findDropdownData,
validationErrors,
initialValues,
viewVersion,
clearValidationErrorForThisField,
isCreateMode,
version,
updateEditedFormData,
additionalProps,
...rest
} = props;
const locale = useLocale();
const classes = useStyles();
const [uiVisibled, setUiVisibled] = useState(true);
const [uiEnabled, setUiEnabled] = useState(true);
const [isLoading, setIsLoading] = useState(false);
const [relatedDropDownValue, setRelatedDropDownValue] = useState({});
const [prevInitialValues, setPrevInitialValues] = useState(initialValues);
const [prevVersion, setPrevVersion] = useState(0);
useEffect(() => {
// for handle initial values
const { name } = field;
const computedVersion = version ? version : viewVersion;
const tabChangeCondition =
prevVersion !== computedVersion &&
prevInitialValues[name] === null &&
initialValues[name] === null;
if (inputInPuzzleForm || inputInQuickCreateDialog || (isCreateMode && computedVersion)) {
if (!lodashIsEqual(prevInitialValues, initialValues) || tabChangeCondition) {
changeFormValue(
name,
!isEmptyObject(initialValues) && !isEmpty(initialValues[name])
? initialValues[name]
: getTypeByField(field) === BOOLEAN_FIELD
? false
: null,
getTypeByField(field) === DATE_FIELD ? true : false,
);
setPrevInitialValues(initialValues);
}
setPrevVersion(computedVersion);
}
}, [initialValues, version]);
// component did mount
useEffect(() => {
const {
defaultValue,
name,
keepFocusAfterSubmit,
keepValueAfterSubmit,
globalParameter,
} = field;
if (!isEmpty(globalParameter)) {
updateDefaultValue(name, globalParameters[globalParameter.toLowerCase()]);
}
if (!isEmpty(defaultValue) || keepValueAfterSubmit) {
const isBoolean = getTypeByField(field) === BOOLEAN_FIELD;
// because boolean fields default value is '1' or '0' in field object !
// this variable below turn the value to true or false if it was BOOLEAN_FIELD.
let adoptedDefaultValue = defaultValue;
if (isBoolean && defaultValue == '0') {
adoptedDefaultValue = false;
} else if (isBoolean && defaultValue == '1') {
adoptedDefaultValue = true;
} else {
// if it wasn't a BOOLEAN_FIELD
adoptedDefaultValue = defaultValue;
}
updateDefaultValue(
name,
initialValues && (initialValues[name] || initialValues[name] === false)
? initialValues[name]
: adoptedDefaultValue,
);
}
if (!keepFocusAfterSubmit && typeof addToIgnoreFocusOnInit === 'function') {
addToIgnoreFocusOnInit(name);
}
}, []);
useEffect(() => {
if (field.uiEnable) getRelatedDropdownValue('uiEnable');
if (field.uiVisible) getRelatedDropdownValue('uiVisible');
}, [formData, dropdownState]);
useEffect(() => {
if (field.uiEnable || field.uiVisible) checkUi();
}, [formData, relatedDropDownValue, dropdownState]);
/**
* check `field` and run `javascriptUiEnable` and `javascriptUiVisible` beased on `formData` and `relatedDropDownValue` and then ran `setUiVisibled` or `setUiEnabled`
* @returns {void}
*/
const checkUi = useCallback(() => {
const { uiVisible, uiEnable, javaScriptUiVisible, javaScriptUiEnable } = field;
if (uiVisible && uiVisible.length && javaScriptUiVisible) {
if (javaScriptUiVisible.indexOf('return') !== -1) {
if (field.uiVisible[0][1] !== '' && relatedDropDownValue) {
try {
const execute = new Function('relatedDropDownValue', `${javaScriptUiVisible}`);
setUiVisibled(execute(clone(relatedDropDownValue)));
} catch (error) {
console.log('javaScriptUiVisible error on %s', field.name, error);
}
} else if (field.uiVisible[0][1] === '' && formData && !isEmptyObject(formData)) {
try {
const execute = new Function('formData', `${javaScriptUiVisible}`);
setUiVisibled(execute(clone(formData)));
} catch (error) {
console.log('javaScriptUiVisible error on %s', field.name, error);
}
}
}
}
if (uiEnable && uiEnable.length && javaScriptUiEnable) {
if (javaScriptUiEnable.indexOf('return') !== -1) {
if (field.uiEnable[0][1] !== '' && relatedDropDownValue) {
try {
const execute = new Function('relatedDropDownValue', `${javaScriptUiEnable}`);
setUiEnabled(execute(clone(relatedDropDownValue)));
} catch (error) {
console.log('javaScriptUiEnable error on %s', field.name, error);
}
} else if (field.uiEnable[0][1] === '' && formData && !isEmptyObject(formData)) {
try {
const execute = new Function('formData', `${javaScriptUiEnable}`);
setUiEnabled(!!execute(clone(formData)));
} catch (error) {
console.log('javaScriptUiEnable error on %s', field.name, error);
}
}
}
}
}, [field, formData, relatedDropDownValue]);
/**
* check field and get dropdown data from redux state
* @param {String} uiCheckType
* @returns {void}
*/
const getRelatedDropdownValue = useCallback(
uiCheckType => {
if (!isLoading && dropdownState === undefined && isEmptyObject(relatedDropDownValue)) {
// Fetch dropdwon data
const fieldName = field[uiCheckType][0][0];
const dropdownMeta = lodashFind(metaData.fields, { name: fieldName }).dropdown;
setIsLoading(true);
} else {
// Use dropdwon data which is availabe in Redux State
setIsLoading(false);
const fieldName = field[uiCheckType][0][0];
const dropName = field[uiCheckType][0][1];
const selectedDropColId = formData[fieldName] !== null ? +formData[fieldName] : null;
if (selectedDropColId !== null) {
const dropdownSelectedCol = lodashFind(dropdownState, {
id: selectedDropColId,
});
if (dropdownSelectedCol) {
setRelatedDropDownValue({ [dropName]: dropdownSelectedCol[dropName] });
}
} else {
setRelatedDropDownValue({});
}
}
},
[dropdownState, formData],
);
const triggerSubmit = lodashDebounce(() => {
const { triggerFormSubmit } = props;
if (typeof triggerFormSubmit === 'function') {
triggerFormSubmit();
} else {
console.log('triggerFormSubmit is not defined, so can not trigger submit');
}
}, TRIGGER_SUBMIT_DEBOUNCE);
const triggerGoToNext = () => {
const { focusOnNextInput, field } = props;
if (typeof focusOnNextInput === 'function') {
focusOnNextInput(field.name);
}
};
const triggerChangeFocusOnAnotherRow = isMoveToNext => {
const { changeFocusOnAnotherRow, field } = props;
if (typeof changeFocusOnAnotherRow === 'function') {
changeFocusOnAnotherRow(field.name, isMoveToNext);
}
};
const handleKeyDown = event => {
// if form is submitted
if (isCtrlEnterPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerSubmit();
}
// if wants to go to next element
else if (isEnterPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerGoToNext();
} else if (isDownPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerChangeFocusOnAnotherRow(true);
} else if (isUpPressed(event)) {
event.preventDefault();
event.stopPropagation();
triggerChangeFocusOnAnotherRow(false);
}
};
/**
* this function will recive a new value and also extract the name of field from props
* then handle the form change value and update form value , default value and editted foem data.
* @function internalOnChange
* @param {string|number|object|Array} value
* @param {string|number|object|Array} previousValue
* @param {string} fieldName
* @returns {void}
*/
const internalOnChange = (value, previousValue, fieldName) => {
const { keepValueAfterSubmit, name } = field;
let tempValue = value;
// eslint-disable-next-line no-prototype-builtins
if (value && typeof value === 'object' && value.hasOwnProperty('nativeEvent')) {
tempValue = value.target.checked; // only BooleanInput
}
// if value should be kept when form is reset, keep value in default value
if (keepValueAfterSubmit) {
updateDefaultValue(name, tempValue);
}
if (typeof changeFormValue === 'function') {
changeFormValue(name, tempValue);
}
// for validation in relation
if (typeof clearValidationErrorForThisField === 'function') {
clearValidationErrorForThisField(name);
}
};
const isFocusable = () => {
const { field, disabled } = props;
return !disabled && !field.readOnly && !field.disabled && uiEnabled;
};
const getDynamicInputRef = ref => {
const { field, getInputRef = () => {} } = props;
setMyRef(ref);
if (isFocusable()) {
getInputRef(field.name, ref, resource);
}
};
const handleClick = () => {
if (myRef && typeof myRef.select === 'function') {
myRef.select();
}
};
const isInputFocusable = isFocusable();
let customError = null;
// TODO refactor to seprate function
if (validationErrors && validationErrors.length) {
const selectedError = validationErrors.filter(err => err.id === field.id);
if (selectedError && selectedError.length) {
customError = selectedError[0].message;
}
}
const inputProps = {
'data-test-field-name': field.name,
'data-test-max-value': field.maxValue,
'data-test-min-value': field.minValue,
'data-test-max-length': field.maxLength ? field.maxLength : 'dosent_matter',
'data-test-field-type': getTypeByField(field),
'data-test-field-hidden': field.hidden ? field.hidden : null,
field,
formData,
source,
resource,
label: !noLabel ? label || lodashGet(field, ['translatedCaption', locale], field.caption) : '',
// required: field.required, // validation will handle required
disabled: !isInputFocusable,
options: {
inputProps: {
disabled: !isInputFocusable,
},
inputRef: getDynamicInputRef,
onKeyDown: isInputFocusable ? handleKeyDown : undefined,
},
onChange: internalOnChange,
visibleClass: uiVisibled && !field.hidden ? '' : 'displayNone',
customError: customError,
viewVersion,
};
if (field.widthPercent) {
inputProps.style = { width: `${field.widthPercent}%` };
}
let inputComponent;
switch (getTypeByField(field)) {
default:
inputComponent = (
<TextInput
{...rest}
{...inputProps}
className={classNames(
classes.inputStyle,
inputInPuzzleForm
? classes.inputStylePuzzleForm
: noLabel
? classes.inputStyleNoLabel
: null,
)}
onClick={handleClick}
/>
);
}
return inputComponent;
};
DynamicInput.defaultProps = {
updateDefaultValue: () => {},
addToIgnoreFocusOnInit: () => {},
};
DynamicInput.propTypes = {
field: PropTypes.object.isRequired,
record: PropTypes.object.isRequired,
source: PropTypes.string.isRequired, // must be defined
triggerFormSubmit: PropTypes.func,
changeFormValue: PropTypes.func,
filterMode: PropTypes.bool,
disableDropdownQuickCreate: PropTypes.bool,
disableDropdownSearchPopup: PropTypes.bool,
noLabel: PropTypes.bool,
inputInPuzzleForm: PropTypes.bool,
defaultOperator: PropTypes.any,
onlyEqualCondition: PropTypes.any,
getRelationRef: PropTypes.func,
updateDefaultValue: PropTypes.func,
addToIgnoreFocusOnInit: PropTypes.func,
focusOnNextInput: PropTypes.func,
changeFocusOnAnotherRow: PropTypes.func,
relationResource: PropTypes.string,
label: PropTypes.string,
validationErrors: PropTypes.object,
initialValues: PropTypes.object,
clearValidationErrorForThisField: PropTypes.func,
additionalProps: PropTypes.object,
};
const mapStateToProps = (state, props) => {
const extraProps = {
globalParameters: lodashGet(state, 'profile.globalParameters', defaultGlobalParameters),
dropdownState: {},
viewVersion: state.admin.ui.viewVersion,
};
const { field, metaData } = props;
if (
field.uiVisible &&
field.uiVisible &&
field.uiVisible.length &&
field.uiVisible[0].length === 3 &&
field.uiVisible[0][1] !== ''
) {
extraProps.dropdownState = getDropDownListFromState(field, 'uiVisible', state, metaData);
} else if (
field.uiEnable &&
field.uiEnable &&
field.uiEnable.length &&
field.uiEnable[0].length === 3 &&
field.uiEnable[0][1] !== ''
) { |
const mapDispatchToProps = {};
export default connect(mapStateToProps, mapDispatchToProps)(DynamicInput); | extraProps.dropdownState = getDropDownListFromState(field, 'uiEnable', state, metaData);
}
return extraProps;
}; | random_line_split |
client.rs | // #[macro_use]
extern crate actix;
// extern crate byteorder;
// extern crate bytes;
extern crate futures;
extern crate serde;
extern crate serde_json;
// extern crate tokio_io;
// extern crate tokio_tcp;
extern crate awc;
extern crate rustls;
extern crate structopt;
#[macro_use]
extern crate log;
extern crate env_logger;
// #[macro_use]
extern crate serde_derive;
use actix::{
// prelude::*, io::FramedWrite
io::{SinkWrite, WriteHandler},
prelude::*,
Actor,
ActorContext,
AsyncContext,
Context,
Handler,
StreamHandler,
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{
lazy,
/* future::ok, */ stream::{SplitSink, Stream},
Future, | // str::FromStr,
// time::Duration,
sync::Arc,
thread,
// net, process, thread,
};
// use tokio_io::{AsyncRead, io::WriteHalf};
// use tokio_tcp::TcpStream;
use awc::{
error::WsProtocolError,
http::StatusCode,
ws::{Codec, Frame, Message},
Client, Connector,
};
use rustls::ClientConfig;
use structopt::StructOpt;
// use webpki;
// use webpki_roots;
// mod codec;
// mod server;
// mod ws;
// mod util;
mod ws_var;
use ws_var::HEARTBEAT_INTERVAL;
#[derive(StructOpt, Debug, Clone)]
/// Generalized WebSocket Client
pub struct Opt {
/// Address to connect
#[structopt(short = "u", default_value = "https://localhost:443/ws")]
url: String,
/// Message to send. Set it to '-' to read stdin to send,
/// leave it blank to use stdin as console loop to send multiple messages.
#[structopt(short = "m", default_value = "")]
msg: String,
}
mod danger {
use rustls::{
self, Certificate, RootCertStore, ServerCertVerified, ServerCertVerifier, TLSError,
};
use webpki;
pub struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_roots: &RootCertStore,
_presented_certs: &[Certificate],
_dns_name: webpki::DNSNameRef<'_>,
_ocsp: &[u8],
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
}
}
fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let opt = Opt::from_args();
// let sys = System::new("ws-client");
System::run(move || {
let mut cfg = ClientConfig::new();
// let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// cfg.set_protocols(&protos);
cfg.dangerous()
.set_certificate_verifier(Arc::new(danger::NoCertificateVerification {}));
let client = Client::build()
.connector(Connector::new().rustls(Arc::new(cfg)).finish())
.finish();
// sys.block_on(
Arbiter::spawn(lazy(move || {
client
.ws(&opt.url)
.connect()
.map_err(|e| panic!("{}", e))
.map(move |(response, framed)| {
let sys = System::current();
if response.status() != StatusCode::SWITCHING_PROTOCOLS {
sys.stop();
}
let (sink, stream) = framed.split();
let addr = WsClient::create(|ctx| {
WsClient::add_stream(stream, ctx);
WsClient(SinkWrite::new(sink, ctx))
});
let read_stdin = || -> String {
let mut cmd = String::new();
if io::stdin().read_line(&mut cmd).is_err() {
println!("error");
}
cmd
};
if opt.msg.is_empty() {
// start console loop
thread::spawn(move || loop {
addr.do_send(ClientCommand(read_stdin()));
});
} else if opt.msg == "-" {
addr.do_send(ClientCommand(read_stdin()));
sys.stop();
} else {
addr.do_send(ClientCommand(opt.msg));
sys.stop();
}
})
}));
})
// ).unwrap();
// sys.block_on(
// ).unwrap();
// Arbiter::spawn(
// TcpStream::connect(&addr)
// .and_then(|stream| {
// let addr = WsClient::create(|ctx| {
// let (r, w) = stream.split();
// WsClient::add_stream(
// FramedRead::new(r, codec::ClientWsCodec),
// ctx,
// );
// WsClient {
// framed: FramedWrite::new(
// w,
// codec::ClientWsCodec,
// ctx,
// ),
// }
// });
// // start console loop
// thread::spawn(move || loop {
// let mut cmd = String::new();
// if io::stdin().read_line(&mut cmd).is_err() {
// println!("error");
// return;
// }
// addr.do_send(ClientCommand(cmd));
// });
// ok(())
// })
// .map_err(|e| {
// println!("Can not connect to server: {}", e);
// process::exit(1)
// }),
// );
// println!("Running ws client");
// sys.run()
}
// struct WsClient {
// framed: FramedWrite<WriteHalf<TcpStream>, codec::ClientWsCodec>,
// }
// #[derive(Message)]
// struct ClientCommand(String);
// impl Actor for WsClient {
// type Context = Context<Self>;
// fn started(&mut self, ctx: &mut Context<Self>) {
// // start heartbeats otherwise server will disconnect after 10 seconds
// self.hb(ctx)
// }
// fn stopped(&mut self, _: &mut Context<Self>) {
// println!("Disconnected");
// // Stop application on disconnect
// System::current().stop();
// }
// }
// impl WsClient {
// fn hb(&self, ctx: &mut Context<Self>) {
// ctx.run_later(Duration::new(, 0), |act, ctx| {
// act.framed.write(codec::WsRequest::Ping);
// act.hb(ctx);
// // client should also check for a timeout here, similar to the
// // server code
// });
// }
// }
// impl actix::io::WriteHandler<io::Error> for WsClient {}
// /// Handle stdin commands
// impl Handler<ClientCommand> for WsClient {
// type Result = ();
// fn handle(&mut self, msg: ClientCommand, _: &mut Context<Self>) {
// let m = msg.0.trim();
// if m.is_empty() {
// return;
// }
// // we check for /sss type of messages
// // if m.starts_with('/') {
// // let v: Vec<&str> = m.splitn(2, ' ').collect();
// // match v[0] {
// // "/list" => {
// // self.framed.write(codec::WsRequest::List);
// // }
// // "/join" => {
// // if v.len() == 2 {
// // self.framed.write(codec::WsRequest::Join(v[1].to_owned()));
// // } else {
// // println!("!!! room name is required");
// // }
// // }
// // _ => println!("!!! unknown command"),
// // }
// // } else {
// self.framed.write(codec::WsRequest::Message(m.to_owned()));
// // }
// }
// }
// /// Server communication
// impl StreamHandler<codec::WsResponse, io::Error> for WsClient {
// fn handle(&mut self, msg: codec::WsResponse, _: &mut Context<Self>) {
// match msg {
// codec::WsResponse::Message(ref msg) => {
// println!("message: {}", msg);
// }
// // codec::WsResponse::Joined(ref msg) => {
// // println!("!!! joined: {}", msg);
// // }
// // codec::WsResponse::Rooms(rooms) => {
// // println!("\n!!! Available rooms:");
// // for room in rooms {
// // println!("{}", room);
// // }
// // println!("");
// // }
// _ => (),
// }
// }
// }
struct WsClient<T>(SinkWrite<SplitSink<Framed<T, Codec>>>)
where
T: AsyncRead + AsyncWrite;
#[derive(Message)]
struct ClientCommand(String);
impl<T: 'static> Actor for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// start heartbeats otherwise server will disconnect after 10 seconds
self.hb(ctx)
}
fn stopped(&mut self, _: &mut Context<Self>) {
info!("Disconnected");
// Stop application on disconnect
System::current().stop();
}
}
impl<T: 'static> WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn hb(&self, ctx: &mut Context<Self>) {
ctx.run_later(HEARTBEAT_INTERVAL, |act, ctx| {
act.0.write(Message::Ping(String::new())).unwrap();
act.hb(ctx);
// client should also check for a timeout here, similar to the
// server code
});
}
}
/// Handle stdin commands
impl<T: 'static> Handler<ClientCommand> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Result = ();
fn handle(&mut self, msg: ClientCommand, _ctx: &mut Context<Self>) {
self.0.write(Message::Text(msg.0)).unwrap();
}
}
/// Handle server websocket messages
impl<T: 'static> StreamHandler<Frame, WsProtocolError> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn handle(&mut self, msg: Frame, _ctx: &mut Context<Self>) {
match msg {
Frame::Text(txt) => println!("Server: {:?}", txt),
_ => (),
}
}
fn started(&mut self, _ctx: &mut Context<Self>) {
info!("Connected");
}
fn finished(&mut self, ctx: &mut Context<Self>) {
info!("Server disconnected");
ctx.stop()
}
}
impl<T: 'static> WriteHandler<WsProtocolError> for WsClient<T> where T: AsyncRead + AsyncWrite {} | };
use std::{
io, | random_line_split |
client.rs | // #[macro_use]
extern crate actix;
// extern crate byteorder;
// extern crate bytes;
extern crate futures;
extern crate serde;
extern crate serde_json;
// extern crate tokio_io;
// extern crate tokio_tcp;
extern crate awc;
extern crate rustls;
extern crate structopt;
#[macro_use]
extern crate log;
extern crate env_logger;
// #[macro_use]
extern crate serde_derive;
use actix::{
// prelude::*, io::FramedWrite
io::{SinkWrite, WriteHandler},
prelude::*,
Actor,
ActorContext,
AsyncContext,
Context,
Handler,
StreamHandler,
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{
lazy,
/* future::ok, */ stream::{SplitSink, Stream},
Future,
};
use std::{
io,
// str::FromStr,
// time::Duration,
sync::Arc,
thread,
// net, process, thread,
};
// use tokio_io::{AsyncRead, io::WriteHalf};
// use tokio_tcp::TcpStream;
use awc::{
error::WsProtocolError,
http::StatusCode,
ws::{Codec, Frame, Message},
Client, Connector,
};
use rustls::ClientConfig;
use structopt::StructOpt;
// use webpki;
// use webpki_roots;
// mod codec;
// mod server;
// mod ws;
// mod util;
mod ws_var;
use ws_var::HEARTBEAT_INTERVAL;
#[derive(StructOpt, Debug, Clone)]
/// Generalized WebSocket Client
pub struct Opt {
/// Address to connect
#[structopt(short = "u", default_value = "https://localhost:443/ws")]
url: String,
/// Message to send. Set it to '-' to read stdin to send,
/// leave it blank to use stdin as console loop to send multiple messages.
#[structopt(short = "m", default_value = "")]
msg: String,
}
mod danger {
use rustls::{
self, Certificate, RootCertStore, ServerCertVerified, ServerCertVerifier, TLSError,
};
use webpki;
pub struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_roots: &RootCertStore,
_presented_certs: &[Certificate],
_dns_name: webpki::DNSNameRef<'_>,
_ocsp: &[u8],
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
}
}
fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let opt = Opt::from_args();
// let sys = System::new("ws-client");
System::run(move || {
let mut cfg = ClientConfig::new();
// let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// cfg.set_protocols(&protos);
cfg.dangerous()
.set_certificate_verifier(Arc::new(danger::NoCertificateVerification {}));
let client = Client::build()
.connector(Connector::new().rustls(Arc::new(cfg)).finish())
.finish();
// sys.block_on(
Arbiter::spawn(lazy(move || {
client
.ws(&opt.url)
.connect()
.map_err(|e| panic!("{}", e))
.map(move |(response, framed)| {
let sys = System::current();
if response.status() != StatusCode::SWITCHING_PROTOCOLS {
sys.stop();
}
let (sink, stream) = framed.split();
let addr = WsClient::create(|ctx| {
WsClient::add_stream(stream, ctx);
WsClient(SinkWrite::new(sink, ctx))
});
let read_stdin = || -> String {
let mut cmd = String::new();
if io::stdin().read_line(&mut cmd).is_err() {
println!("error");
}
cmd
};
if opt.msg.is_empty() {
// start console loop
thread::spawn(move || loop {
addr.do_send(ClientCommand(read_stdin()));
});
} else if opt.msg == "-" {
addr.do_send(ClientCommand(read_stdin()));
sys.stop();
} else {
addr.do_send(ClientCommand(opt.msg));
sys.stop();
}
})
}));
})
// ).unwrap();
// sys.block_on(
// ).unwrap();
// Arbiter::spawn(
// TcpStream::connect(&addr)
// .and_then(|stream| {
// let addr = WsClient::create(|ctx| {
// let (r, w) = stream.split();
// WsClient::add_stream(
// FramedRead::new(r, codec::ClientWsCodec),
// ctx,
// );
// WsClient {
// framed: FramedWrite::new(
// w,
// codec::ClientWsCodec,
// ctx,
// ),
// }
// });
// // start console loop
// thread::spawn(move || loop {
// let mut cmd = String::new();
// if io::stdin().read_line(&mut cmd).is_err() {
// println!("error");
// return;
// }
// addr.do_send(ClientCommand(cmd));
// });
// ok(())
// })
// .map_err(|e| {
// println!("Can not connect to server: {}", e);
// process::exit(1)
// }),
// );
// println!("Running ws client");
// sys.run()
}
// struct WsClient {
// framed: FramedWrite<WriteHalf<TcpStream>, codec::ClientWsCodec>,
// }
// #[derive(Message)]
// struct ClientCommand(String);
// impl Actor for WsClient {
// type Context = Context<Self>;
// fn started(&mut self, ctx: &mut Context<Self>) {
// // start heartbeats otherwise server will disconnect after 10 seconds
// self.hb(ctx)
// }
// fn stopped(&mut self, _: &mut Context<Self>) {
// println!("Disconnected");
// // Stop application on disconnect
// System::current().stop();
// }
// }
// impl WsClient {
// fn hb(&self, ctx: &mut Context<Self>) {
// ctx.run_later(Duration::new(, 0), |act, ctx| {
// act.framed.write(codec::WsRequest::Ping);
// act.hb(ctx);
// // client should also check for a timeout here, similar to the
// // server code
// });
// }
// }
// impl actix::io::WriteHandler<io::Error> for WsClient {}
// /// Handle stdin commands
// impl Handler<ClientCommand> for WsClient {
// type Result = ();
// fn handle(&mut self, msg: ClientCommand, _: &mut Context<Self>) {
// let m = msg.0.trim();
// if m.is_empty() {
// return;
// }
// // we check for /sss type of messages
// // if m.starts_with('/') {
// // let v: Vec<&str> = m.splitn(2, ' ').collect();
// // match v[0] {
// // "/list" => {
// // self.framed.write(codec::WsRequest::List);
// // }
// // "/join" => {
// // if v.len() == 2 {
// // self.framed.write(codec::WsRequest::Join(v[1].to_owned()));
// // } else {
// // println!("!!! room name is required");
// // }
// // }
// // _ => println!("!!! unknown command"),
// // }
// // } else {
// self.framed.write(codec::WsRequest::Message(m.to_owned()));
// // }
// }
// }
// /// Server communication
// impl StreamHandler<codec::WsResponse, io::Error> for WsClient {
// fn handle(&mut self, msg: codec::WsResponse, _: &mut Context<Self>) {
// match msg {
// codec::WsResponse::Message(ref msg) => {
// println!("message: {}", msg);
// }
// // codec::WsResponse::Joined(ref msg) => {
// // println!("!!! joined: {}", msg);
// // }
// // codec::WsResponse::Rooms(rooms) => {
// // println!("\n!!! Available rooms:");
// // for room in rooms {
// // println!("{}", room);
// // }
// // println!("");
// // }
// _ => (),
// }
// }
// }
struct | <T>(SinkWrite<SplitSink<Framed<T, Codec>>>)
where
T: AsyncRead + AsyncWrite;
#[derive(Message)]
struct ClientCommand(String);
impl<T: 'static> Actor for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// start heartbeats otherwise server will disconnect after 10 seconds
self.hb(ctx)
}
fn stopped(&mut self, _: &mut Context<Self>) {
info!("Disconnected");
// Stop application on disconnect
System::current().stop();
}
}
impl<T: 'static> WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn hb(&self, ctx: &mut Context<Self>) {
ctx.run_later(HEARTBEAT_INTERVAL, |act, ctx| {
act.0.write(Message::Ping(String::new())).unwrap();
act.hb(ctx);
// client should also check for a timeout here, similar to the
// server code
});
}
}
/// Handle stdin commands
impl<T: 'static> Handler<ClientCommand> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Result = ();
fn handle(&mut self, msg: ClientCommand, _ctx: &mut Context<Self>) {
self.0.write(Message::Text(msg.0)).unwrap();
}
}
/// Handle server websocket messages
impl<T: 'static> StreamHandler<Frame, WsProtocolError> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn handle(&mut self, msg: Frame, _ctx: &mut Context<Self>) {
match msg {
Frame::Text(txt) => println!("Server: {:?}", txt),
_ => (),
}
}
fn started(&mut self, _ctx: &mut Context<Self>) {
info!("Connected");
}
fn finished(&mut self, ctx: &mut Context<Self>) {
info!("Server disconnected");
ctx.stop()
}
}
impl<T: 'static> WriteHandler<WsProtocolError> for WsClient<T> where T: AsyncRead + AsyncWrite {}
| WsClient | identifier_name |
Subsets and Splits