repo_name
stringlengths
6
97
path
stringlengths
3
341
text
stringlengths
8
1.02M
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_6_Spark_Extra.scala
package org.pengfei.Lesson05_Spark_ML import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType} import org.apache.spark.sql.functions._ object Lesson05_6_Spark_Extra { def main(args:Array[String])={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder().master("local[2]").appName("Lesson5_6_Spark_Extra").getOrCreate() /**********************************5.6.1 Dealing with null in Spark *****************************************/ SparkNullExample(spark) } /********************************************************************************************* * ******************************5.6.1 Dealing with null in Spark **************************** * *******************************************************************************************/ /* Datasets are usally filled with many null values and you’ll constantly need to write code that * gracefully handles these null values. You’ll need to use null values correctly in spark, adhere * to Scala best practices regarding null values, and possibly break Scala best practices for performance * sensitive code. * * In this section, we will explain how to work with null in Spark * * What is null? * * In SQL databases, "null" means that some value is unknown, missing, or irrelevant." The SQL concept of null is * different than null in programming languages like scala. Spark dataframe best practices are aligned with sql best * practices, so Spark should use null for values that are unknow, missing or irrelevant. */ /*******************************************Null vs NaN ***************************************/ /* null values represents "no value" or "nothing", it's not even an empty string or zero. It can be used to * represent that nothing useful exists. * * NaN stands for "Not a Number", it's usually the result of a mathematical operation that doesn't * make sense, e.g. 0.0/0.0.*/ /****************************** Spark support null and NaN value **********************************/ /* name,country,zip_code joe,usa,89013 ravi,india, "",,12389 "",, NaN,,NaN * * When spark read the above csv file into a dataframe, all the blank values and empty strings are read into a null * by the spark sql lib (since spark 2.0.1) * * In spark dataset/frame, all null value of data source is consider unknown or missing. * See the below example*/ def SparkNullExample(spark:SparkSession):Unit={ import spark.implicits._ val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/sample_null_value.csv" val df=spark.read.option("header","true").option("inferSchema","true").csv(filePath) df.show() /* Spark function may also return null value, for example, we try to calculate the mean and stddev of a * string column, it returns null, because the return value is irrelevant.*/ val letterDf= Seq(("a"),("b")).toDF("letter") letterDf.describe().show() /* Other example, when joining DataFrames, the join column will return null when a match cannot be made. */ /**************************************Get null/na count***********************************/ /* we can use isNull or isNan, isNull can only detect the null/empty value, isNaN can detect only NaN */ val nullInName=df.filter($"name".isNull).count() val nullInCountry=df.filter($"country".isNull).count() val nullInCode=df.filter($"zip_code".isNull).count() println(s"null value number in Name $nullInName, in country $nullInCountry, in zip_code $nullInCode") val nanInName=df.filter($"name".isNaN).count() val nanInCountry=df.filter($"country".isNaN).count() val nanInCode=df.filter($"zip_code".isNaN).count() println(s"nan value number in Name $nanInName, in country $nanInCountry, in zip_code $nanInCode") val totalInName=df.filter($"name".isNull||$"name".isNaN).count() val totalInCountry=df.filter($"country".isNull||$"country".isNaN).count() val totalInCode=df.filter($"zip_code".isNull||$"zip_code".isNaN).count() println(s"Total null value number in Name $totalInName, in country $totalInCountry, in zip_code $totalInCode") /**********************************Scala null Conventions********************************/ /* <NAME>, the author of Beginning Scala, stated “Ban null from any of your code. Period.” * <NAME>, a prominent Scala blogger and author, explains why Option is better than null in * this blog post. The Scala community clearly perfers Option to avoid the pesky null pointer exceptions * that have burned them in Java. * * But spark sql is more sql than scala. And for performance sensitive code, null is better than option, * in order to avoid virtual method calls and boxing.*/ /*************************************nullable Columns*************************************/ /*When we define a dataset/frame schema, we could specify the value of a column is nullable or not. For * example, name column is not nullable, and age column is nullable. In other words, the name column * cannot take null values, but the age column can take null values.*/ val schema= List( StructField("name",StringType,false), StructField("age",IntegerType,true) ) val data=Seq( Row("miguel",null), Row("luisa",21) ) val df1=spark.createDataFrame(spark.sparkContext.parallelize(data),StructType(schema)) df1.show() /* If we set one of the name rows to be null, the code will blow up with this error: * “Error while encoding: java.lang.RuntimeException: The 0th field ‘name’ of input row * cannot be null”.*/ /*******************************Transform column which contains null values***********************/ val numRDD=spark.sparkContext.parallelize(List( Row(1), Row(8), Row(12), Row(null) )) // println(numRDD.toDebugString) // println(numRDD.collect().toArray.mkString(";")) val numSchema= List( StructField("number",IntegerType,true) ) val numDf= spark.createDataFrame(numRDD,StructType(numSchema)) numDf.show() /* Now we want to add a new column which tells if the number is even or not, first we use function isEvenSimple * which does not consider the num value is null*/ //val isEvenSimpleUdf=udf[Boolean,Integer](isEvenSimple) spark.udf.register("isEvenSimple",(number:Int)=>isEvenSimple(number)) spark.udf.register("isEvenBad",(num:Int)=>isEvenBad(num)) spark.udf.register("isEvenScala",(num:Int)=>isEvenScala(num)) val evenSimpleDf=numDf.withColumn("is_even",expr("isEvenSimple(number)")) evenSimpleDf.show() // here we use a when(condition,value) method, we only call isEvenSimple function when the number is not null // ifnot we just fill null value val evenGoodDf=numDf.withColumn("is_even_Good",when(col("number").isNotNull,expr("isEvenSimple(number)")).otherwise(lit(null))) evenGoodDf.show() val evenBadDf=numDf.withColumn("is_even_bad",expr("isEvenBad(number)")) evenBadDf.show() val evenRBadDf=numDf.withColumn("is_even_Rbad",when(col("number").isNull,false).otherwise(expr("isEvenBad(number)"))) evenRBadDf.show() val evenScalaDf=numDf.withColumn("is_even_Scala",expr("isEvenScala(number)")) evenScalaDf.show() // With all the above test, finally spark manage well null value transformation, so it's enough to just // use the isEvenSimple function /***************************************fill null/nan with other value************************************/ /*To deal with null or nan, there is only two ways: * - drop the row with null/nan value * - fill the null cell with a value, In general it's the mean of the column, * In spark we can use .na.drop or na.fill to do these tasks * na.drop will drop any rows which contains null or nan * na.fill(arg) respect the type of the column, for example na.fill(0) will repalce all column of type int * na.fill("Na") will replace all column with type string*/ val afterDropDf=df.na.drop() afterDropDf.show() val afterFillIntDf=df.na.fill(0) afterFillIntDf.show() val afterFillStrDf=df.na.fill("NA") afterFillStrDf.show() /* *You can notice that the name column still has NaN, because only in columns of digit type NaN means "Not a Number". *In string column NaN is considered as a normal value. */ } def isEvenSimple(num:Int):Boolean={ num%2 ==0 } // this function treat null badly, it may confuse data engineer with a false value in the is_even column and the number // is null. def isEvenBad(num:Int):Boolean={ // we can't compare int with null. so we can never enter in the if, it always go to the else if(num==null){ false }else{ num%2==0 } } /*Best way to deal with null in scala is to use option type*/ def isEvenScala(num:Int):Option[Boolean]={ if(num==null){ None } else{ Some(num%2==0) } } }
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/basics/FileToRdd.scala
package org.pengfei.spark.basics import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.SparkSession import org.apache.spark.sql.functions.typedLit object FileToRdd { def main(args:Array[String])={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder(). master("local[2]"). //spark://10.70.3.48:7077 remote appName("CalHousing"). getOrCreate() // import sparkSession.implicits._ for all schema conversion magic. import spark.implicits._ val file1="/DATA/data_set/spark/basics/Lesson1_RDD/file1.txt" val file2="/DATA/data_set/spark/basics/Lesson1_RDD/file2.txt" // in spark 2.2. spark session read will return dataset. here we don't provide schema, // so it returns a dataset with one column (value) as type String val data1=spark.read.text(file1).as[String] val data2=spark.read.text(file2).as[String] data1.show() //This step transform string "1,9,5" to a string list ["1","9","5"] val values1=data1.map(value => value.split(",")) //This step transform column value ["1","9","5"] to three column val df1=values1.select($"value".getItem(0).as("col1"),$"value".getItem(1).as("col1"),$"value".getItem(2).as("col2")) // val df1=values1.select($"value".getItem(0).as("col1"),typedLit(Seq($"value".getItem(1).as[String],$"value".getItem(2).as[String])).as("col2")) df1.show() val rdd1= df1.rdd print(rdd1) } }
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson01_RDD/test.scala
package org.pengfei.Lesson01_RDD object test { def main(args:Array[String]):Unit={ val fileName="pliu.txt" val badfn="pliu.txt.bkp" println(getExtention(fileName)) println(getExtention(badfn)) } def getExtention(fileName:String):String={ val index=fileName.lastIndexOf(".") if(index>0)return fileName.substring(index+1) else return "None" } } /* * * * * * import org.apache.spark.sql.types._ val inputFile = "/test_data/bioaster-2018-03-06.fist" val schema = StructType(Array( StructField("Blocks", LongType, true), StructField("Perms", LongType, true), StructField("nlinks", IntegerType, true), StructField("Uid", LongType, true), StructField("Gid", LongType, true), StructField("Size", LongType, true), StructField("Mtime", LongType, true), StructField("Atime", LongType, true), StructField("Ctime", LongType, true), StructField("Name", StringType, true))) val df = spark.read.format("com.databricks.spark.csv").option("delimiter", ":").schema(schema).load(inputFile) df.show(5) * * * * * import java.time.{Instant,ZoneId,ZonedDateTime} def getSize(rawSize:Long): String ={ val unit:Array[String]=Array("B","KB","MB","GB","TB") var index=0 var tmpSize:Long=rawSize while(tmpSize>=1024){ tmpSize=tmpSize/1024 index+=1 } return tmpSize+unit(index) } def getFileName(fullPath:String):String={ val fileName=fullPath.substring(fullPath.lastIndexOf("/")+1) return fileName } def getParentDir(fullPath:String):String={ val parentDir=fullPath.substring(0,fullPath.lastIndexOf("/")) return parentDir } def getDateInMillis(date:String):Long={ val format=new java.text.SimpleDateFormat("m/dd/yyyy") val time=format.parse(date).getTime()/1000 return time } def getDate(rawDate:Long):String={ val timeInMillis = System.currentTimeMillis() val instant = Instant.ofEpochSecond(rawDate) val zonedDateTimeUtc= ZonedDateTime.ofInstant(instant,ZoneId.of("UTC")) val zonedDateTimeCet=ZonedDateTime.ofInstant(instant,ZoneId.of("CET")) zonedDateTimeUtc.toString } def getExtention(fileName:String):String={ val index=fileName.lastIndexOf(".") if(index>0)return fileName.substring(index+1) else return "None" } spark.udf.register("getDate", (arg: Long)=>getDate(arg)) spark.udf.register("getFileName", (arg:String)=>getFileName(arg)) spark.udf.register("getParentDir",(arg:String)=>getParentDir(arg)) spark.udf.register("getExtention",(arg:String)=>getExtention(arg)) spark.udf.register("getSize",(arg:Long)=>getSize(arg)) * * * * * * val result = df.withColumn("DataType", expr("substring(Perms, 1, length(Perms)-4)")).withColumn("ACL", expr("substring(Perms, length(Perms)-3,length(Perms))")).drop("Perms") .withColumn("FileName",expr("getFileName(Name)")) .withColumn("ParentDir",expr("getParentDir(Name)")).drop("Mtime").drop("Ctime") .withColumn("Extention",expr("getExtention(Name)")) result.show(5) //result.count() * * * //Total fastq file count and size val allFastqs=result.filter($"FileName".endsWith(".fastq")) allFastqs.count() val totalSize=allFastqs.agg(sum("Size")).first.get(0) val HSize=getSize(totalSize.asInstanceOf[Number].longValue) * * * val fastqCountByDir=allFastqs.groupBy($"ParentDir").count().orderBy($"count".desc) //fastqCountByDir.count() //fastqCountByDir.show(5) val fastqSizeByDir=allFastqs.groupBy($"ParentDir").agg(expr("sum(Size) as TotalSize")).orderBy($"TotalSize".desc) .withColumn("HTotalSize",expr("getSize(TotalSize)")) fastqSizeByDir.show(10) val fileTypeSortByNum=result.groupBy($"Extention").count().orderBy($"count".desc) //fileTypeSortByNum.show(10) val fileTypeSortBySize=result.groupBy($"Extention").agg(expr("sum(Size) as TotalSize")).orderBy($"TotalSize".desc) .withColumn("HTotalSize",expr("getSize(TotalSize)")) fileTypeSortBySize.show(10) val duplicatedFile=result.groupBy($"FileName").count().filter($"count">1).orderBy($"count".asc) duplicatedFile.show(20) * */
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/ml/feature/extraction/KeyWordExtraction.scala
package org.pengfei.spark.ml.feature.extraction import org.apache.log4j.{Level, Logger} import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer} import org.apache.spark.sql.SparkSession /* * In the following code segment, we start with a set of sentences. * We split each sentence into words using Tokenizer. * For each sentence (bag of words), we use HashingTF to hash * the sentence into a feature vector. We use IDF to rescale * the feature vectors; this generally improves performance * when using text as features. Our feature vectors could then * be passed to a learning algorithm. * */ /* * The whole example are copied from https://spark.apache.org/docs/latest/ml-features.html#tf-idf * */ object KeyWordExtraction { def main(args:Array[String])={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder(). master("local"). appName("KeyWordExtraction"). getOrCreate() val sentenceData=spark.createDataFrame(Seq( (0.0, "Hi I heard about Spark"), (0.0, "I wish Java could use case classes"), (1.0, "Logistic regression models are neat") )).toDF("label","sentence") val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words") val wordsData = tokenizer.transform(sentenceData) val hashingTF=new HashingTF() .setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(2000) val featurizedData = hashingTF.transform(wordsData) featurizedData.show(10) val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features") val idfModel = idf.fit(featurizedData) val rescaledData = idfModel.transform(featurizedData) rescaledData.select("label", "features").show() } }
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/ml/data/preparation/NullValueEliminator.scala
<reponame>pengfei99/Spark package org.pengfei.spark.ml.data.preparation import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.{DataFrame, SparkSession} class NullValueEliminator { def main(args:Array[String]): Unit ={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder(). master("local[2]"). //spark://10.70.3.48:7077 remote appName("CalHousing"). getOrCreate() //spark.conf.set("") import spark.implicits._ case class Company(cName: String, cId: String, details: String) case class Employee(name: String, id: String, email: String, company: Company) val e1 = Employee("n1", null, "<EMAIL>", Company("c1", "1", "d1")) val e2 = Employee("n2", "2", "<EMAIL>", Company("c1", "1", "d1")) val e3 = Employee("n3", "3", "<EMAIL>", Company("c1", "1", "d1")) val e4 = Employee("n4", "4", "<EMAIL>", Company("c2", "2", "d2")) val e5 = Employee("n5", null, "<EMAIL>", Company("c2", "2", "d2")) val e6 = Employee("n6", "6", "<EMAIL>", Company("c2", "2", "d2")) val e7 = Employee("n7", "7", "<EMAIL>", Company("c3", "3", "d3")) val e8 = Employee("n8", "8", "<EMAIL>", Company("c3", "3", "d3")) val employees = Seq(e1, e2, e3, e4, e5, e6, e7, e8) print(employees.getClass().getName()) val sc=spark.sparkContext val rdd=sc.parallelize(employees) print(rdd.getClass().getName()) //val df=spark.createDataFrame(rdd) //val df=sc.parallelize(employees).toDF //df.show() //df.filter("id is null").show() // df.withColumn("id", when($"id".isNull, 0).otherwise(1)).show } def removeNullValueOfFeatureColumns(dataFrame:DataFrame,columnNames:Array[String]):DataFrame={ var result:DataFrame=dataFrame for(columnName <- columnNames){ result=result.filter(result(columnName).isNotNull) } return result } }
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/basics/MultiDimentionalAgg.scala
<reponame>pengfei99/Spark package org.pengfei.spark.basics import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.SparkSession import org.apache.spark.sql.functions._ object MultiDimentionalAgg { def main(args: Array[String])={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder(). master("local[2]"). //spark://10.70.3.48:7077 remote appName("USAFlight"). getOrCreate() //spark.conf.set("") import spark.implicits._ val sales = Seq( ("Warsaw", 2016, 100), ("Warsaw", 2017, 200), ("Boston", 2015, 50), ("Boston", 2016, 150), ("Toronto", 2017, 50) ).toDF("city", "year", "amount") sales.show() val groupByCityAndYear = sales.groupBy("city","year").agg(sum("amount") as "amount") groupByCityAndYear.show() val total_amount=sales.agg(sum("amount").cast("long")).first.getLong(0) print(total_amount) } }
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson13_Anomaly_Detection/Lesson13_Anomaly_Detection.scala
<reponame>pengfei99/Spark<gh_stars>0 package org.pengfei.Lesson13_Anomaly_Detection import com.typesafe.config.ConfigFactory import org.apache.log4j.{Level, Logger} import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.ml.clustering.{KMeans, KMeansModel} import org.apache.spark.ml.feature._ import org.apache.spark.sql.{DataFrame, SparkSession} import org.apache.spark.sql.functions._ import org.apache.spark.ml.linalg.{Vector, Vectors} import scala.util.Random /* * The source code of Advanced Analytics with Spark can be found https://github.com/sryza/aas * */ object Lesson13_Anomaly_Detection { /**************************************************************************************************************** * ***************************************13.1 Introduction ************************************************** * ***********************************************************************************************************/ /* In this lesson, we will use a unsupervised learning technique (e.g. clustering), Clustering can identify * groups inside a data set. Consider the problem of dividing up an ecommerce site’s customers by their shopping * habits and tastes. The input features are their purchases, clicks, demographic information, and more. * The output should be groupings of customers: perhaps one group will represent fashion-conscious buyers, * another will turn out to correspond to pricesensitive bargain hunters, and so on. * * If you were asked to determine this target label for each new customer, you would quickly run into a problem * in applying a supervised learning technique like a classifier: you don’t know a priori who should be considered * fashion-conscious, for example. In fact, you’re not even sure if “fashion-conscious” is a meaningful grouping of * the site’s customers to begin with! * * Fortunately, unsupervised learning techniques can help. These techniques do not learn to predict a target * value, because none is available. They can, however, learn structure in data and find groupings of similar * inputs, or learn what types of input are likely to occur and what types are not. * */ /****************************************************************************************************************** * ***************************************13.2 Anomaly Detection ************************************************ * **************************************************************************************************************/ /* The inherent problem of anomaly detection is, as its name implies, that of finding unusual things. If we already * knew what “anomalous” meant for a data set, we could easily detect anomalies in the data with supervised learning. * An algorithm would receive inputs labeled “normal” and “anomaly”, and learn to distinguish the two. * * However, the nature of anomalies is that they are unknown unknowns. Put another way, an anomaly that has been * observed and understood is no longer an anomaly. Anomaly detection is often used to find fraud, detect network * attacks, or discover problems in servers or other sensor-equipped machinery. In these cases, it’s important * to be able to find new types of anomalies that have never been seen before—new forms of fraud, intrusions, * and failure modes for servers. * * Unsupervised learning techniques are useful in these cases because they can learn what input data normally * looks like, and therefore detect when new data is unlike past data. Such new data is not necessarily attacks * or fraud; it is simply unusual, and therefore, worth further investigation. * */ /****************************************************************************************************************** * **************************************13.3 K-means clustering ************************************************* * ***************************************************************************************************************/ /* Clustering is the best-known type of unsupervised learning. Clustering algorithms try to find natural groupings * in data. Data points that are like one another but unlike others are likely to represent a meaningful grouping, * so clustering algorithms try to put such data into the same cluster. * * K-means clustering may be the most widely used clustering algorithm. It attempts to detect k clusters in a data * set, where k is given by the data scientist. k is a hyperparameter of the model, and the right value will depend * on the data set. In fact, choosing a good value for k will be a central plot point in this chapter. * * What does “like” mean when the data set contains information like customer activity? Or transactions? K-means * requires a notion of distance between data points. It is common to use simple Euclidean distance to measure * distance between data points with K-means, and as it happens, this is the only distance function supported by * Spark MLlib as of this writing. The Euclidean distance is defined for data points whose features are all numeric. * “Like” points are those whose intervening distance is small. * * To K-means, a cluster is simply a point: the center of all the points that make up the cluster. These are, * in fact, just feature vectors containing all numeric features, and can be called vectors. However, it may be * more intuitive to think of them as points here, because they are treated as points in a Euclidean space. * * This center is called the cluster centroid, and is the arithmetic mean of the points—hence the name K-means. * To start, the algorithm picks some data points as the initial cluster centroids. Then each data point is assigned * to the nearest centroid. Then for each cluster, a new cluster centroid is computed as the mean of the data points * just assigned to that cluster. This process is repeated. * */ /****************************************************************************************************************** * *********************************** 13.4 Network intrusion **************************************************** * **************************************************************************************************************/ /* So-called cyberattacks are increasingly visible in the news. Some attacks attempt to flood a computer with * network traffic to crowd out legitimate traffic. But in other cases, attacks attempt to exploit flaws in * networking software to gain unauthorized access to a computer. While it’s quite obvious when a computer * is being bombarded with traffic, detecting an exploit can be like searching for a needle in an incredibly * large haystack of network requests. * * Some exploit behaviors follow known patterns. For example, accessing every port on a machine in rapid succession * is not something any normal software program should ever need to do. However, it is a typical first step for an * attacker looking for services running on the computer that may be exploitable. If you were to count the number * of distinct ports accessed by a remote host in a short time, you would have a feature that probably predicts * a port-scanning attack quite well. A handful is probably normal; hundreds indicates an attack. The same goes for * detecting other types of attacks from other features of network connections—number of bytes sent and received, * TCP errors, and so forth. But what about those unknown unknowns? The biggest threat may be the one that has * never yet been detected and classified. Part of detecting potential network intrusions is detecting anomalies. * These are connections that aren’t known to be attacks but do not resemble connections that have been observed * in the past. * * Here, unsupervised learning techniques like K-means can be used to detect anomalous network connections. * K-means can cluster connections based on statistics about each of them. The resulting clusters themselves * aren’t interesting per se, but they collectively define types of connections that are like past connections. * Anything not close to a cluster could be anomalous. Clusters are interesting insofar as they define regions * of normal connections; everything else outside is unusual and potentially anomalous. * */ /**************************************************************************************************************** * ****************************************** 13.5 Data Set ************************************************ * *************************************************************************************************************/ /* The data set which we use in this Lesson is from KDD Cup (http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html) * * Don’t use this data set to build a real network intrusion system! The data did not necessarily reflect real * network traffic at the time—even if it did, it reflects traffic patterns from 17 years ago. * * Fortunately, the organizers had already processed raw network packet data into summary information about * individual network connections. The data set is about 708 MB in size and contains about 4.9 million connections. * This is large, if not massive,and is certainly sufficient for our purposes here. For each connection, the data set * contains information like the number of bytes sent, login attempts, TCP errors, and so on. Each connection is * one line of CSV-formatted data set, containing 38 features, like this: * 0,tcp,http,SF,215,45076,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,1,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0,0.00, * 0.00,0.00,0.00,0.00,0.00,0.00,0.00,normal. * * The above line represents a TCP connection to an HTTP service—215 bytes were sent and 45,706 bytes were received. * The user was logged in, and so on. Many features are counts, like num_file_creations in the 17th column * * Many features take on the value 0 or 1, indicating the presence or absence of a behavior, like su_attempted in * the 15th column. They look like the one-hot encoded categorical features, but are not grouped and related in * the same way. * * Each is like a yes/no feature (binary categorical), and is therefore arguably a categorical feature. It is not * always valid to translate categorical features as numbers and treat them as if they had an ordering. However, * in the special case of a binary categorical feature, in most machine learning algorithms, mapping these to a * numeric feature taking on values 0 and 1 will work well. * * The rest are ratios like dst_host_srv_rerror_rate in the next-to-last column, and take on values from 0.0 to 1.0, * inclusive. Interestingly, a label is given in the last field. Most connections are labeled normal., but some have * been identified as examples of various types of network attacks. These would be useful in learning to distinguish * a known attack from a normal connection, but the problem here is anomaly detection and finding potentially new * and unknown attacks. This label will be mostly set aside for our purposes.*/ def main(args:Array[String]):Unit={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark=SparkSession.builder().appName("Lesson13_Anomaly_Detection").master("local[2]").getOrCreate() import spark.implicits._ val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark") val path= sparkConfig.getString("sourceDataPath") val filePath=s"${path}/spark_lessons/Lesson13_Anomaly_Detection/kddcup.data" val rawWithoutHeader=spark.read.option("inferSchema","true").option("header","false").csv(filePath) val columnName=Seq("duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins", "logged_in", "num_compromised", "root_shell", "su_attempted", "num_root", "num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds", "is_host_login", "is_guest_login", "count", "srv_count", "serror_rate", "srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate", "diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count", "dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate", "dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label") val data = rawWithoutHeader.toDF(columnName:_*) // val colNum=data.columns.length // val rowNum=data.count() // println(s"Data has $colNum columns, $rowNum rows") // data.show(1) //data.printSchema() data.cache() /************************************************************************************************************** * ********************************************* 13.6 First take on Clustering ******************************* * ************************************************************************************************************/ FirstTakeOnClustering(data) /************************************************************************************************************ * ************************************************13.7 Choosing K ****************************************** * ************************************************************************************************************/ /* We know we have at least 23 distinct patterns in the data, so the k value could be at least 23, or even more. * We will try many k value to find the best k value. But what is the "best" K value? * * A clustering could be considered good if each data point were near its closest centroid, where “near” is defined * by the Euclidean distance. This is a simple, common way to evaluate the quality of a clustering, by the mean * of these distances over all points, or sometimes, the mean of the distances squared. In fact, KMeansModel offers * a computeCost method that computes the sum of squared distances and can easily be used to compute the mean * squared distance. * * Unfortunately, there is no simple Evaluator implementation to compute this measure, not like those available * to compute multiclass classification metrics. It’s simple enough to manually evaluate the clustering cost for * several values of k. Note that the following code could take 10 minutes or more to run. * * */ //Calculate mean distance for each K //(20 to 100 by 20).map(k=> (k,ClusteringScoreForDifferentK(data,k))).foreach(println) /* We have tested for k=20,40,60,80,100. * (20,6.649218115128446E7) * (40,2.5031424366033625E7) * (60,1.027261913057096E7) * (80,1.2514131711109027E7) * (100,7235531.565096531) * * The printed result shows that the score decreases as k increases. Note that scores are shown in scientific * notation; the first value is over 10 pussiance 7, not just a bit over 6. * * Note the output of each execution is different, Because the clustering depends on a randomly chosen initial * set of centroids. * * However, this much is obvious, As more clusters are added, it should be possible to put data points closer to the * nearest centroid. If we go extreme, k value = number of data points, the average distance will be 0. Each data * point has it's own cluster. * * */ /************************************************************************************************************** * ********************************************** 13.8 Choosing the starting point ************************** * *********************************************************************************************************/ /* In the previous output, we could notice the distance for k=80 is higher than for k=60. This shouldn't happen * because higher k always permits at least as good a clustering as a lower k. The problem is that K-means is not * necessarily able to find the optimal clustering for a given k. Its iterative process can converge from a random * starting point to a local mimimum, which may be good but is not optimal. * * This is still true even when more intelligent methods are used to choose initial centroids. K-means++ and K-means|| * are variants of selection algorithms that are more likely to choose diverse, separated centroids and lead more * reliably to a good clustering. Spark MLlib implements K-means||. But all still have an element of randomness * in selection of starting point and can't guarantee an optimal clustering. * * We can improve it by running the iteration longer. The algorithm has a threshold via setTol() that controls the * minimum amount of cluster centroid movement considered significant. Lower values means algorithm will let the * centroids continue to move longer. Increasing the maximum number of iterations with setMaxIter() also prevents * it from potentially stopping too early at the cost of possibly more computation*/ // calculate mean distance for each k with better choosen starting point // (20 to 100 by 20).map(k=> (k,ImproveStartingPointClusteringScore(data,k))).foreach(println) /* (20,5.8227534372047536E7) * (40,3.795519679283671E7) * (60,2.0655360813366067E7) * (80,1.1507239713147238E7) * (100,9888290.180070076) * * The output score is much better, as k value increase, mean distance decrease */ /************************************************************************************************************** * ********************************************** 13.9 Feature Normalization ******************************** * *********************************************************************************************************/ /* As features value range can be small(0,..,100) and big (0,..,1000000000000000), this makes the data viz difficult * on a 3D projection. * * We can normalize each feature by converting it to a standard score. This means subtracting the mean of the * feature's values from each value, and dividing by the standard deviation, as shown in the standard score * equation: X_t = (X - X.mean)/X.std. For more info about "unit standard deviation", check my wiki page * pengfei.liu:data_science:statistic:glossary#unit_standard_deviation * * In fact, subtracting means has no effect on the clustering because the subtraction effectively shifts all the * data points by the same amount in the same directions. This does not affect inter-point Euclidean distances. * * ML and MLlib both provide StandardScaler, a component that can perform this kind of standardization and be easily * added to the pipeline.*/ /* * withStd: True by default. Scales the data to unit standard deviation. * withMean: False by default. Centers the data with mean before scaling. It will build a dense output, so take * care when applying to sparse input. * */ // calculate mean distance for each k with better choose starting point and feature normalization // (20 to 100 by 20).map(k=> (k,ClusteringScoreWithFeatureNormalization(data,k))).foreach(println) /* * (20,7.412644144009894) * (40,2.5915895471974357) * (60,1.153548902077912) * (80,0.818219598187268) * (100,0.6208390857775707) * The feature Normalization has helped put dimensions on more equal footing, and the absolute distances between * points (and thus the cost) is much smaller in abolute terms. However, there isn't yet an obvious value of k * beyond which increasing it doess little to improve the cost.*/ /************************************************************************************************************** * ********************************************** 13.10 Categorical Variables ******************************** * *********************************************************************************************************/ /* Normalization was a valuable step forward, but more can be done to improve the clustering. In particular, * several features have been left out entirely because they aren't numeric. * * To use the categorical features, we could use the one hot encoding to transform them into several binary numeric * features. For example, the second column contains the protocol type: tcp, udp, icmp. We could use three binary * features "is_TCP", "is_UDP", "is_ICMP" to represent this column. * * */ // MyClusteringScoreWithCategoricalFeature(data,2) // (160 to 270 by 30).map(k=> (k,ClusteringScoreWithCategoricalFeature(data,k))).foreach(println) /* The following are the results of different k with one hot encoding of three Categorical feature * (160,1.982904044799585) (190,1.626247178667135) (220,1.1937676521871465) (250,0.9986357098095597) * * */ /************************************************************************************************************** * ********************************************** 13.11 Using labels with Entropy *************************** * *********************************************************************************************************/ /* In previous examples, we use existing labels to check the quality of our clustering and choosing k. A good * clustering, it seems, should agree with these human-applied labels. It should put together points that share a * label frequently and not lump together points of many different labels. It should produce clusters with relatively * homogeneous labels. * * In lesson 12, we have metrics for homogeneity: Gini impurity and entropy. These are functions of the proportions * of labels in each cluster, and produce a number that is low when the proportions are skewed toward few, or one, * label. The function entropy will be used here for illustration. */ /* A good clustering would have clusters whose collections of labels are homogeneous and so have low entropy. A * weighted average of entropy can therefore be used as a cluster score. Generally, entropy refers to disorder * or uncertainty. For more information about Entropy, see https://en.wikipedia.org/wiki/Entropy_(information_theory) * */ // (60 to 270 by 30).map(k => (k, ClusteringScoreWithLabelEntropy(data, k))).foreach(println) /* * (60,0.03475331900669869) * (90,0.051512668026335535) * (120,0.02020028911919293) * (150,0.019962563512905682) * (180,0.01110240886325257) * (210,0.01259738444250231) * * Here again, results suggest k=180 is a reasonable choice because its score is actually lower than 150 and 210 * */ /* Now with k=180, we can print the labels for each cluster to get some sense of the resulting clustering. * Clusters do seem to be dominated by one type of attack each, and contain only a few types. */ /* val pipelineModel=FullKMeansPipeLine(data,180) val countByClusterLabel=pipelineModel.transform(data) .select("cluster","label") .groupBy("cluster","label").count() .orderBy("cluster","label") countByClusterLabel.show() */ /************************************************************************************************************** * ********************************************** 13.12 Anomaly Detection *************************** * *********************************************************************************************************/ /* Now we can make an actual anomaly detector. Anomaly detection amounts to measuring a new data point's distance * to its nearest centroid. If this distance exceeds some threshold, it it anomalous. This threshold might be * chosen to be the distance of, say, the 100th-farthest data point from among know data * */ // AnomalyDetection(data) /* The output is [9,tcp,telnet,SF,307,2374,0,0,1,0,0,1,0,1,0,1,3,1,0,0,0,0,1,1,0.0,0.0,0.0,0.0,1.0,0.0,0.0,69, * 4,0.03,0.04,0.01,0.75,0.0,0.0,0.0,0.0,normal.] * * Althought, the label says normal, but we could notice that this connections connect to 69 different hosts. * * **/ /************************************************************************************************************** * ********************************************** 13.13 Future works ***************************************** * *********************************************************************************************************/ /* 1. The kMean model which we use is only a simplistic one. For example, Euclidean distance is used in this * example. Because it is the only distance function supported by ML lib at this time. In the future, we may use * distance functions that can better account for the distributions of and correlations between features, such as * Mahalanobis distance (https://en.wikipedia.org/wiki/Mahalanobis_distance) * * 2. There are also sophisticated cluster-quality evaluation metrics that could be applied (even without labels) * to pick k, such as the Silhouette coefficient (https://en.wikipedia.org/wiki/Silhouette_(clustering)). These * tend to evaluate not just closeness of points within one cluster, but closeness of points to other clusters. * * Finally, different models could be applied instead of simple K-means clustering; for example, * - Gaussian mixture model (https://en.wikipedia.org/wiki/Mixture_model#Gaussian_mixture_model) * - DBSCAN https://en.wikipedia.org/wiki/DBSCAN * These two could capture more subtle relationships between data points and the cluster centers. Spark Ml lib * aleady implements Gaussian mixture models. * * 3. The code in this Lesson cloud be used within Spark Streaming to score*/ } /*************************************************13.6 First model **************************************/ def FirstTakeOnClustering(data:DataFrame):Unit={ /* As we noticed before, we have a label column. Let's see how many different labels we have in this data set*/ // data.select("label").groupBy("label").count().orderBy(desc("count")).show(25) /* You will find the following output, There are 23 distinct labels, the most frequent are smurf. and neptune. *attacks | label| count| +----------------+-------+ | smurf.|2807886| | neptune.|1072017| | normal.| 972781| | satan.| 15892| */ /* We have noticed we have non-numeric columns in our dataset, for example second column is a categorical feature * which could have values such as tcp, udp, icmp. * * But K-mean clustering model only accept numeric features. For our first model, we will just ignore all non numeric * features*/ val nonNumericFeature=Array("protocol_type","service","flag") val DataWithOnlyNumericFeature=data.drop(nonNumericFeature:_*) val numericFeatureCol=DataWithOnlyNumericFeature.columns.filter(_ !="label") val assembler = new VectorAssembler().setInputCols(numericFeatureCol).setOutputCol("featureVector") val kmeans = new KMeans().setPredictionCol("cluster").setFeaturesCol("featureVector") val pipeline = new Pipeline().setStages(Array(assembler,kmeans)) val pipelineModel = pipeline.fit(DataWithOnlyNumericFeature) val kmeansModel=pipelineModel.stages.last.asInstanceOf[KMeansModel] //kmeansModel.clusterCenters.foreach(println) /* The output of kmeansModel clusterCenters is two vectors of coordinates which describe the center of two cluster * (aka. centroid). As there are only two vectors, we cloud say K-means was fitting k=2 clusters to the data. We * know that we have at least 25 different groups, so k=2 will never give us a accurate model which can do the * clustering. * * The following code will tell us how the output clusters matched the labels which we know */ val DataWith2Cluster=pipelineModel.transform(DataWithOnlyNumericFeature) DataWith2Cluster.select("cluster","label").groupBy("cluster","label").count().orderBy(col("cluster"),desc("count")).show(25) /* We could notice only label portsweep is in cluster 1 and all other labels are in cluster 0. */ } /******************************************13.7 Choosing K ********************************************/ def ClusteringScoreForDifferentK(data:DataFrame,k:Int):Double={ val nonNumericFeature=Array("protocol_type","service","flag") val DataWithOnlyNumericFeature=data.drop(nonNumericFeature:_*) val numericFeatureCol=DataWithOnlyNumericFeature.columns.filter(_ !="label") val assembler = new VectorAssembler().setInputCols(numericFeatureCol).setOutputCol("featureVector") val kmeans=new KMeans().setSeed(Random.nextLong()) .setK(k) .setPredictionCol("cluster") .setFeaturesCol("featureVector") val pipeline=new Pipeline().setStages(Array(assembler,kmeans)) val kmeansModel=pipeline.fit(data).stages.last.asInstanceOf[KMeansModel] //compute mean from total squared distance ("cost") kmeansModel.computeCost(assembler.transform(data))/data.count() } /**********************************13.8 Choosing the starting point ************************************/ def ImproveStartingPointClusteringScore(data:DataFrame,k:Int):Double={ val nonNumericFeature=Array("protocol_type","service","flag") val DataWithOnlyNumericFeature=data.drop(nonNumericFeature:_*) val numericFeatureCol=DataWithOnlyNumericFeature.columns.filter(_ !="label") val assembler = new VectorAssembler().setInputCols(numericFeatureCol).setOutputCol("featureVector") val kmeans=new KMeans().setSeed(Random.nextLong()) .setK(k) .setPredictionCol("cluster") .setFeaturesCol("featureVector") //increase from the default value is 20 .setMaxIter(40) //decrease from the default value 1.0e-4 .setTol(1.0e-5) val pipeline=new Pipeline().setStages(Array(assembler,kmeans)) val kmeansModel=pipeline.fit(data).stages.last.asInstanceOf[KMeansModel] //compute mean from total squared distance ("cost") //computeCost method evaluate clustering by computing Within Set(cluster) Sum of Squared Errors /* */ kmeansModel.computeCost(assembler.transform(data))/data.count() } /*************************************13.9 Feature Normalization ******************************************/ def ClusteringScoreWithFeatureNormalization(data:DataFrame,k:Int):Double={ val nonNumericFeature=Array("protocol_type","service","flag") val DataWithOnlyNumericFeature=data.drop(nonNumericFeature:_*) val numericFeatureCol=DataWithOnlyNumericFeature.columns.filter(_ !="label") val assembler = new VectorAssembler().setInputCols(numericFeatureCol).setOutputCol("featureVector") //the scaler normalize each feature to have unit standard deviation. val scaler = new StandardScaler() .setInputCol("featureVector") .setOutputCol("scaledFeatureVector") .setWithStd(true) .setWithMean(false) val kmeans=new KMeans() .setSeed(Random.nextLong()) .setK(k) .setPredictionCol("cluster") .setFeaturesCol("scaledFeatureVector") .setMaxIter(40) .setTol(1.0e-5) val pipeline=new Pipeline().setStages(Array(assembler,scaler,kmeans)) val pipelineModel=pipeline.fit(data) val kMeansModel=pipelineModel.stages.last.asInstanceOf[KMeansModel] kMeansModel.computeCost(pipelineModel.transform(data))/data.count() } /****************************** 13.10 One hot code Categorical Feature *****************************/ /** Creates a pipeline which transform string categorical column into one hot encoded vector column * * @param inputCol input column name * @return return the generated pipeline and name of the output column */ def OneHotPipeLine(inputCol:String):(Pipeline,String)={ /* * The StringIndexer will read all possible value of a categorical value in a dataset, and encoded with numeric * values, in our example, we have the following result for protocol_type * * +-------------+---------------------+ * |protocol_type|protocol_type_indexed| * +-------------+---------------------+ * | udp| 2.0| * | tcp| 1.0| * | icmp| 0.0| * +-------------+---------------------+ * */ val indexer = new StringIndexer() .setInputCol(inputCol) .setOutputCol(inputCol+"_indexed") val encoder = new OneHotEncoderEstimator() .setInputCols(Array(inputCol+"_indexed")) .setOutputCols(Array(inputCol+"_vec")) val pipeline = new Pipeline().setStages(Array(indexer,encoder)) // return the pipeline and name of the output column (pipeline,inputCol+"_vec") } /* Break down the one hot encoder pipeline into steps with out put*/ def MyClusteringScoreWithCategoricalFeature(data:DataFrame,k:Int):Double={ val categoricalFeatures=Array("protocol_type","service","flag") val feature="protocol_type" val indexer=new StringIndexer().setInputCol(feature).setOutputCol(feature+"_indexed") val indexModel=indexer.fit(data) val indexData=indexModel.transform(data) // indexData.select(feature,feature+"_indexed").distinct().show(5) // OneHotEncoder is deprecated since spark 2.3, We can use OneHotEncoderEstimator to replace it. /*val encoder = new OneHotEncoder() .setInputCol(feature+"_indexed") .setOutputCol(feature+"_vec") val encodedData=encoder.transform(indexData)*/ val encoder = new OneHotEncoderEstimator() .setInputCols(Array(feature+"_indexed")) .setOutputCols(Array(feature+"_vec")) val encoderModel = encoder.fit(indexData) val encodedData = encoderModel.transform(indexData) encodedData.select(feature,feature+"_indexed",feature+"_vec").distinct().show(5) /* The output of one hot encoder in spark is not binary numeric columns. The output is a description of a vector * The description is an Array of three elements, first element is the length of the vector, second element is the * position of value, the third element is the value * * * +-------------+---------------------+-----------------+ * |protocol_type|protocol_type_indexed|protocol_type_vec| * +-------------+---------------------+-----------------+ * | tcp| 1.0| (2,[1],[1.0])| * | udp| 2.0| (2,[],[])| * | icmp| 0.0| (2,[0],[1.0])| * +-------------+---------------------+-----------------+ * * The above output is the onehot encoding of feature protocol_type, * icmp -> (stringIndexer) 0 -> 10 (vec encoding) -> (2,[0],[1.0]). The vector has length 2, and position 0 * has value 1.0. Note that the position of 10 starts from left to right. So value 1 position is 0 not 1. * */ /*for(features <- categoricalFeatures){}*/ return 0.0 } def ClusteringScoreWithCategoricalFeature(data:DataFrame,k:Int):Double={ val (protoTypeEncoder, protoTypeVecCol) = OneHotPipeLine("protocol_type") val (serviceEncoder, serviceVecCol) = OneHotPipeLine("service") val (flagEncoder, flagVecCol) = OneHotPipeLine("flag") // Original columns, without label / string columns, but with new vector encoded cols // We can use -- and ++ to add or remove element from a Set in scala. val assembleCols = Set(data.columns: _*)-- Seq("label", "protocol_type", "service", "flag")++ Seq(protoTypeVecCol, serviceVecCol, flagVecCol) val assembler = new VectorAssembler(). setInputCols(assembleCols.toArray). setOutputCol("featureVector") val scaler = new StandardScaler() .setInputCol("featureVector") .setOutputCol("scaledFeatureVector") .setWithStd(true) .setWithMean(false) val kmeans = new KMeans(). setSeed(Random.nextLong()). setK(k). setPredictionCol("cluster"). setFeaturesCol("scaledFeatureVector"). setMaxIter(40). setTol(1.0e-5) val pipeline = new Pipeline().setStages( Array(protoTypeEncoder, serviceEncoder, flagEncoder, assembler, scaler, kmeans)) val pipelineModel = pipeline.fit(data) val kmeansModel = pipelineModel.stages.last.asInstanceOf[KMeansModel] kmeansModel.computeCost(pipelineModel.transform(data)) / data.count() } /************************************** 13.11 Using labels with entropy ****************************/ def entropy(counts: Iterable[Int]):Double={ // get all positive values in the counts collection val values=counts.filter(_ >0) // cast all values to double and do sum val n = values.map(_.toDouble).sum val entropy=values.map{v=> //calculate p first val p=v / n // -p * math.log(p) }.sum /* We can use the following code, if you don't want to define a local variable values.map { v => -(v / n) * math.log(v / n) }.sum */ return entropy } def FullKMeansPipeLine(data:DataFrame,k:Int):PipelineModel={ // transform the string categorical feature column into one hot encoding column val (protoTypeEncoder, protoTypeVecCol) = OneHotPipeLine("protocol_type") val (serviceEncoder, serviceVecCol) = OneHotPipeLine("service") val (flagEncoder, flagVecCol) = OneHotPipeLine("flag") // Start with the original columns, remove label and string columns, and adding new vector one hot encoded cols val assembleCols = Set(data.columns: _*) -- Seq("label", "protocol_type", "service", "flag") ++ Seq(protoTypeVecCol, serviceVecCol, flagVecCol) // Take all feature column and transform them into a Vector val assembler = new VectorAssembler(). setInputCols(assembleCols.toArray). setOutputCol("featureVector") // normalize the feature val scaler = new StandardScaler() .setInputCol("featureVector") .setOutputCol("scaledFeatureVector") .setWithStd(true) .setWithMean(false) // build the kmeans model val kmeans = new KMeans(). setSeed(Random.nextLong()). setK(k). setPredictionCol("cluster"). setFeaturesCol("scaledFeatureVector"). setMaxIter(40). setTol(1.0e-5) // build the pipeline with above steps val pipeline = new Pipeline().setStages( Array(protoTypeEncoder, serviceEncoder, flagEncoder, assembler, scaler, kmeans)) // train the model pipeline.fit(data) } def ClusteringScoreWithLabelEntropy(data:DataFrame,k:Int):Double={ val spark=data.sparkSession import spark.implicits._ // get the trained kmean model val pipelineModel=FullKMeansPipeLine(data,k) //predict cluster for each label with the trained model val clusterLabel=pipelineModel.transform(data).select("cluster","label").as[(Int,String)] //calculate the cluster entropy val weightedClusterEntropy = clusterLabel // Extract collections of labels, per cluster .groupByKey{case(cluster,_)=>cluster} .mapGroups{case(_,clusterLabels)=> val labels = clusterLabels.map { case (_, label) => label }.toSeq // Count labels in collections val labelCounts = labels.groupBy(identity).values.map(_.size) labels.size * entropy(labelCounts) }.collect() // Average entropy weighted by cluster size weightedClusterEntropy.sum / data.count() } def AnomalyDetection(data:DataFrame):Unit={ val spark=data.sparkSession import spark.implicits._ val pipelineModel=FullKMeansPipeLine(data,180) // get the trained model val kMeansModel=pipelineModel.stages.last.asInstanceOf[KMeansModel] // get the centroids of all clusters val centroids=kMeansModel.clusterCenters // predict clusters of the data set val clusteredData=pipelineModel.transform(data) val threshold = clusteredData.select("cluster","scaledFeatureVector").as[(Int,Vector)] .map{case(cluster,vec)=>Vectors.sqdist(centroids(cluster),vec)} .orderBy($"value".desc) //single output implicitly named "value" .take(100).last val originalCols = data.columns // filter all rows which sqaure distance is greater than the threshold val anomalies = clusteredData.filter { row => val cluster = row.getAs[Int]("cluster") val vec = row.getAs[Vector]("scaledFeatureVector") Vectors.sqdist(centroids(cluster), vec) >= threshold }.select(originalCols.head, originalCols.tail:_*) // we use first to make the read easy, show works too println(anomalies.first()) } }
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/ml/classification/SupportVectorMachineClassification.scala
<gh_stars>0 package org.pengfei.spark.ml.classification import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.SparkSession import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType} import org.apache.spark.sql.functions.udf import org.apache.spark.mllib.classification.SVMWithSGD import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.optimization.L1Updater import org.apache.spark.mllib.regression.LabeledPoint /*In this tutorial, we use the iris dataset*/ object SupportVectorMachineClassification { def main(args:Array[String]): Unit ={ /* * Init the spark session * */ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder(). master("local"). appName("SVMClassification"). getOrCreate() /* * read data from csv file * */ val inputFile="file:///home/pliu/Documents/spark_input/iris.txt" val schema = StructType(Array( StructField("sepal_length",DoubleType,false), StructField("sepal_width",DoubleType,false), StructField("petal_length",DoubleType,false), StructField("petal_width",DoubleType,false), StructField("Label",StringType,false) )) //Read csv file val df = spark.read.format("com.databricks.spark.csv").option("header", "false").schema(schema).load(inputFile) //println("full records number: "+ df.count) //df.show(5) /* * As svm classifier can only treat two label classification problems * we need to remove one type of iris flower, In this example, I removed * versicolor * * To make it easier for classifer we conver the label from string to int * ris-setosa to 1 and Iris-virginica to 0 * */ //df.select(df("label")).distinct.show() val readyDf=df.filter(df("label")=!="Iris-versicolor") //println("processed records number: "+ readyDf.count) //readyDf.select(df("label")).distinct.show() val convertUDF=udf(convertLabelToNum) val cleanDF=readyDf.withColumn("NumLabel",convertUDF(readyDf("label"))).drop("label").withColumnRenamed("NumLabel","label") //cleanDF.show(5) /* * transforme the feature columns into a vector * */ // vecDf.show(5) /* * transform dataframe to labeledPoint RDD * Before spark 2.0 * the labeledPoint is in package org.apache.spark.mllib.regression.LabeledPoint * The vectors is in org.apache.spark.mllib.linalg.Vectors * * After spark 2.0 * the labeledPoint is in package org.apache.spark.ml.feature.LabeledPoint * the vectors is in org.apache.spark.ml.linalg.Vectors * * "sepal_width","petal_length","petal_width" * */ val labeled = cleanDF.rdd.map(row=>LabeledPoint(row.getAs[Double]("label"),Vectors.dense(row.getAs[Double]("sepal_length"), row.getAs[Double]("sepal_width"),row.getAs[Double]("petal_length"),row.getAs[Double]("petal_width")))) // labeled.foreach(println) /* * Split the data into training and testing dataset with 70 and 30 ratio * * */ val splits=labeled.randomSplit(Array(0.6,0.4),seed=11L) val training = splits(0).cache() val test=splits(1) /* * build svmwithsgd model, and it's still in the mllib package, * and it's not compatible with spark 2.0 ml package, * */ val numIterations = 1000 val model = SVMWithSGD.train(training,numIterations) // clear the default threshold(门坎), the classifier will show // the real classification score model.clearThreshold() val scoreAndLabels = test.map { point => val score = model.predict(point.features) (score, point.label) } // scoreAndLabels.foreach(println) // set the threshold to 0, all the score <0 will be negative predication // all the score > 0 will be positive predication model.setThreshold(0.0) // we build a score metrics to measure the accuracy of the classifier val metrics = new BinaryClassificationMetrics(scoreAndLabels) val auROC = metrics.areaUnderROC() // print the accuracy println("Area under ROC = " + auROC) // build a new classifer with optimisation val svmAlg = new SVMWithSGD() svmAlg.optimizer. setNumIterations(2000). setRegParam(0.1). setUpdater(new L1Updater) val modelL1 = svmAlg.run(training) // Test the new trained classifier with a test data // 5.1,3.5,1.4,0.2,Iris-setosa val result=modelL1.predict(Vectors.dense(5.1,3.5,1.4,0.2)) println("prediction result"+result.toString) } // We define a lamba expression function to convert Iris-setosa to 1 // Iris-virginica to 0 def convertLabelToNum:(String=>Double)= { label => if(label.equalsIgnoreCase("Iris-virginica")) 0.0 else if (label.equalsIgnoreCase("Iris-setosa")) 1.0 else 2.0 } } /* * (-2.627551665051128,0.0) (-2.145161194882099,0.0) (-2.3068829871403618,0.0) (-3.0554378212130096,0.0) * * * * */
pengfei99/Spark
common_utils/scala/data_transformation.scala
<filename>common_utils/scala/data_transformation.scala<gh_stars>0 /** * This method transform multi rows of an object into columns, after the transformation, for each object we only have * one row in the data frame. To make the transformation, this method cast all column to type string, as we don't do * arthimetic operations here. So it won't be a problem. You can change the type back to Int, or double after the * transformation. * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param rawDf The source data frame in which the transformation will take place. * @param targetIdColumnName The column in the data frame which contains the name of the filed. Each row will become * the name of the newly created column name. * @param targetValueColumnName The column in the data frame which contains the value of the filed. Each row will * become a new row in the corresponding filed column * @return a data frame which contains the result of the transformation * * */ def RowToColumn(rawDf: DataFrame, objectIdColumnName:String,targetIdColumnName: String,targetValueColumnName:String): DataFrame = { val spark = rawDf.sparkSession import spark.implicits._ /* Step0. Eliminate all null rows, it may create a null dataframe (dataframe without rows), we can't build columns * with no rows, so we need to fill null with a null value which will not cause null pointer exception. * As a result, we cast all columns to string type and fill the null cell with pre-defined nullValue(String)*/ val df = rawDf.select(rawDf.columns.map(c => col(c).cast(StringType)) : _*).na.fill(nullValue) /* Step1. Get all possible filedIDs, which will be become the column name of each filed */ val filedIDs = df.select(targetIdColumnName).distinct().orderBy(df(targetIdColumnName).asc) filedIDs.show(10,false) // convert the column in the data frame which contains the filed Ids to an Array of the filed Ids. val filedIDsArray: Array[String] = filedIDs.collect().map(_.get(0).toString) /* Step2. Build the (filedId,filedValue) <key,value> map for each row. */ /* We have two solutions to do this. * Solution 1 : build a user define function which build a map * Solution 2 : Spark provide map function which can build a map based on two columns * Here we choose Solution 2 , spark native function is always better than udf.*/ // Solution 1: If we don't fill null value before, here we need to use Option type to avoid null pointer /*def buildFiledMap(filedName:String,filedValue:String):Map[String,Option[String]]={ if(filedValue.isEmpty) Map(filedName->None) else Map(filedName->Option(filedValue)) } spark.udf.register("buildFiledMap",(arg1:String,arg2:String)=>buildFiledMap(arg1,arg2)) val filedIdValueMap=df.withColumn("filed_map",expr(s"buildFiledMap(${targetIdColumnName},${targetValueColumnName})"))*/ /* def buildFiledMap(filedName:String,filedValue:String):Map[String,String]={ if(filedValue.isEmpty) Map(filedName->"null") else Map(filedName->filedValue) } spark.udf.register("buildFiledMap",(arg1:String,arg2:String)=>buildFiledMap(arg1,arg2)) val filedIdValueMap=df.withColumn("filed_map",expr(s"buildFiledMap(${targetIdColumnName},${targetValueColumnName})")) */ /* Solution 2 : The spark native map function * The map function by default does not deal with null value, so if we have null value in the two columns you will * have x->, or ->y, when you have functions to call these null values, you will have null pointer exception. * The solution is to fill the null value with a string "null", **/ val filedIdValueMap = df.withColumn("filed_map", map(df(targetIdColumnName), df(targetValueColumnName))) filedIdValueMap.show(5,false) /* Step3. Group the (filedId,filedValue) map for each distinct subject which may have multiple rows. Each row has * a map. After group, we concatenate all maps of a subject into one single map. Here, we used collect_list, there is * another similar function collect_set, which list returns an ordered sequence of elements, set returns an unordered * distinct list of elements, we know that, we will not have duplicate filedId for one subject. so we don't need to use * set, we prefer to use list.*/ val groupedFiledIdValueMap = filedIdValueMap.groupBy(objectIdColumnName) .agg(collect_list("filed_map")) // return a list of map .as[(String, Seq[Map[String, String]])] // <-- leave Rows for typed pairs .map { case (id, list) => (id, list.reduce(_ ++ _)) } // <-- concatenate all maps to a single map // the reduce(_ ++ _) translates to reduce((a,b)=>a++b) where a, b are lists, ++ is a method in list interface // which concatenates list b to a. .toDF(objectIdColumnName, "filed_map") groupedFiledIdValueMap.show(10, false) /* Step 4. Create column for each fieldId based on the complete fieldId list, with the getFiledValue function, * */ val bFiledIDsArray: Broadcast[Array[String]] = spark.sparkContext.broadcast(filedIDsArray) def getFiledValue(filedId: String, filedMap: Map[String, String]): String = { //you can replace the empty (null) value as you want, here I tried empty string "", "null" and "." if(filedMap.isEmpty||filedId.isEmpty){nullValue} else { filedMap.getOrElse(filedId, nullValue) } } //spark.udf.register("getFiledValue", (arg1: String, arg2: Map[String, String]) => getFiledValue(arg1, arg2)) spark.udf.register("getFiledValue", getFiledValue(_:String, _: Map[String, String])) var tmpDf = groupedFiledIdValueMap (0 until bFiledIDsArray.value.length).map { i => val filedId: String = bFiledIDsArray.value(i) tmpDf = tmpDf.withColumn("current_id", lit(filedId)) .withColumn(filedId, expr("getFiledValue(current_id,filed_map)")) .drop("current_id") // The solution which takes a variable and a column does not work, because, the udf only allows column type as argument // //tmpDf=tmpDf.withColumn(filedId,getFiledValue(filedId,filed_map))) } val result=tmpDf.drop("filed_map") result.show(5,false) result } /** * This function takes a data frame,and a Map[oldColName,newColName], it will replace the old column name by the * new column name and returns the data frame with new names. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @param nameMap A Map of [oldColName,newColName] * @return DataFrame * */ def ChangeColName(df:DataFrame,nameMap:Map[String,String]):DataFrame={ val oldNames=nameMap.keySet.toArray var result=df for(oldName<-oldNames){ result=result.withColumnRenamed(oldName,nameMap.getOrElse(oldName,"No_keys")) } return result } /** * This function takes a data frame, a list of column names, a old value, and a new value, it will replace the old * value by the new value in all given columns of the data frame. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param rawDf The source data frame. * @param colNames A list of column names * @param specValue A string value which needs to be replaced * @param newValue A string value which will repalce the old value * @return DataFrame * */ def replaceSpecValue(rawDf:DataFrame,colNames:Array[String],specValue:String,newValue:String):DataFrame={ /*Step 0 : cast all column to string*/ val spark=rawDf.sparkSession import spark.implicits._ val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*) /*Step 1 : transform spec value to null*/ var result=df for(colName<-colNames){ val newColName=colName+"_tmp" result=result.withColumn(newColName, when(result(colName) === specValue, newValue).otherwise(result(colName))) //create a tmp col with digitnull .drop(colName) //drop the old column .withColumnRenamed(newColName,colName) // rename the tmp to colName } result } /** * This function merges values of two column, if one is null, return other, if two values are not null, check if they * are equal, otherwise raise exception, two column cant be merged. * @author <NAME> * @version 1.0 * @since 2019-02-13 * @param col1Value first column value to be merged * @param col2Value second column value to be merged * @return String * */ def mergeValue(col1Value:String,col2Value:String):String={ if (col1Value.equals("null")) {return col2Value} else if(col2Value.equals("null") || col1Value.equals(col2Value)) {return col1Value} else {return "error"} } // define a spark udf for mergeValue funciton val mergeValueUDF = udf(mergeValue(_:String, _: String)) /** * This function takes a dataframe and a list of sofa v2 column names. Based on the v2 column names, it can build the * corresponding v1 column names, then it calls the udf mergeValue to merge the v1 and v2 column. In the end it * removes the v2 day01 and v1 column, and add the merged column. * * @author <NAME> * @version 1.0 * @since 2019-02-27 * @param df the source data frame * @param colNameList the sofa v2 column names list * @return DataFrame * */ def mergeSofaColumns(df:DataFrame,colNameList:Array[String]):DataFrame={ var result=df for(colName<-colNameList){ // We exclude CBD_SOFA_NA, because it does not exist in V1, so no need to do the merge if (!colName.equals("CBD_SOFA_NA")){ /* CBD_Cardio_SOFA, generates CBD_Cardio_SOFA_D01 and CBD_Cardio_SOFA_Theoretical_D1 */ val col1Name=s"${colName}_Theoretical_D1" val col2Name=s"${colName}_D01" result=result.withColumn(s"merged_${col2Name}", mergeValueUDF(col(col1Name),col(col2Name))) //check the merge result result.select(s"merged_${col2Name}",col1Name,col2Name).show(10,false) //clean the result, drop v1 and V2 day01 columns, and rename merged_column to V2 day01 result=result.drop(col1Name) .drop(col2Name) .withColumnRenamed(s"merged_${col2Name}",col2Name) } } result } /** * This function takes a data frame, a column name and an array of specific value. It will remove all rows if the * given column contains the specific value in the Array. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @param colName target column name * @param specValues an Array of specific values * @return DataFrame * */ def removeRowsWithSpecValues(df:DataFrame,colName:String,specValues:Array[String]):DataFrame={ var result=df for(specValue<-specValues){ result=result.filter(!(result(colName)===specValue)) } result } /** * This function takes a data frame and returns a map of (colNum->colName), the elements of the return map are * sorted by the column number with asc order. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @return a Map[Int, String] * */ def getColumnNumNameMapping(df:DataFrame):scala.collection.immutable.ListMap[Int,String]={ val columns=df.columns var i=1 var colNameNumMap=Map[Int,String]() for(col<-columns){ colNameNumMap+=(i->col) i=i+1 } /* To understand the following function, it's better to break it into two parts * 1. val x:Seq[(Int,String)] = colNameNumMap.toSeq.sortWith(_._1 < _._1) * * 2. ListMap(x:_*) * * The sortWith function returns a sequence of tuples, it takes a boolean expression, in * our example, _._1 means the first element of a tuple. We can also replace the sortWith * function with sortBy(_._1), which means sort the sequence by using the first element of the * tuples, from low to high. It also returns a sequence of tuples. * * The _* is used to convert the data so it will be passed as multiple parameters. In our example, * x has a Sequence type, but x:_* has tuple type * */ ListMap(colNameNumMap.toSeq.sortWith(_._1 < _._1):_*) }
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson20_GPFS_Stats/Lesson20_GPFS_Stats.scala
<reponame>pengfei99/Spark<filename>LearningSpark/src/main/java/org/pengfei/Lesson20_GPFS_Stats/Lesson20_GPFS_Stats.scala<gh_stars>0 package org.pengfei.Lesson20_GPFS_Stats import java.sql.Timestamp import java.time._ import com.typesafe.config.ConfigFactory import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.expressions.UserDefinedFunction import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ import org.apache.spark.sql.{DataFrame, SparkSession} object Lesson20_GPFS_Stats { def main(args:Array[String]):Unit={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark=SparkSession.builder().master("local[2]").appName("Lesson20_GPFS_Stats").getOrCreate() spark.udf.register("getDate", (arg: Long)=>getDate(arg)) spark.udf.register("getFileName", (arg:String)=>getFileName(arg)) spark.udf.register("getParentDir",(arg:String)=>getParentDir(arg)) spark.udf.register("getExtention",(arg:String)=>getExtention(arg)) spark.udf.register("getSize",(arg:Long)=>getSize(arg)) spark.udf.register("buildID",(a1:String,a2:Long)=>buildID(a1,a2)) spark.udf.register("getUtec",(arg:String)=>getUtec(arg)) //val createParentDirColUDF = udf(createParentDirCol(_:DataFrame,_:String, _: String)) val createParentDirColUDF: UserDefinedFunction = spark.udf.register("createParentDirCol",(a1:DataFrame,a2:String,a3:String)=>createParentDirCol(a1,a2,a3)) /************************************************************************************************ * ******************************* Lesson20 GPFS Stats ********************************* * ******************************************************************************************/ /* In this lesson, we will learn how to use spark sql to analyse the statistic of a file system metric. The data * is the extraction (first 30000 lines) from a real file system. */ val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark") val path= sparkConfig.getString("sourceDataPath") val inputFile=s"${path}/spark_lessons/Lesson20_GPFS_Stats/gpfs_stats_sample.fist" val schema = StructType(Array( StructField("Blocks", LongType, true), StructField("Perms", LongType, true), StructField("nlinks", IntegerType, true), StructField("Uid", LongType, true), StructField("Gid", LongType, true), StructField("Size", LongType, true), StructField("Mtime", LongType, true), StructField("Atime", LongType, true), StructField("Ctime", LongType, true), StructField("Name", StringType, true))) val df = spark.read.format("com.databricks.spark.csv").option("delimiter", ":").schema(schema).load(inputFile) df.show(5) /************************************* 20.1 Data pre-processing *************************************/ val parsedDf = df.withColumn("DataType", expr("substring(Perms, 1, length(Perms)-4)")) .withColumn("ACL", expr("substring(Perms, length(Perms)-3,length(Perms))")).drop("Perms") .withColumn("FileName",expr("getFileName(Name)")) .withColumn("ParentDir",expr("getParentDir(Name)")).drop("Mtime").drop("Ctime") .withColumn("Extention",expr("getExtention(Name)")) .withColumn("FileID",expr("buildID(FileName,Size)")) .withColumn("Utec",expr("getUtec(Name)")) .withColumn("LastAccessTime",expr("getDate(Atime)")) parsedDf.show(5) parsedDf.cache() /*********************************** 20.2 Working with time *****************************/ /* We have several time functions in Spark: * - current_date: Gives current date as a date column. (e.g. 2019-05-23) * - current_timestamp: Gives current time as a time stamp column. (e.g. 2019-05-23 10:37:19.585) * - date_format: It creates a Column with DateFormatClass binary expression. * - to_date(col(String))-> date: Converts column to date type(with an optional date format) * - to_timestamp: Converts column to timestamp type (with an optional timestamp format) * - unix_timestamp: Converts current or specified time to Unix timestamp (in seconds) * - window: Generates time windows(i.e. tumbling, sliding and delayed windows) * */ // workingWithTime(spark) /*********************************** 20.3 Finding duplicate *****************************/ // findingDuplicate(parsedDf,10000L) /************************************ 20.4 Basic stats **********************************/ // basicStats(parsedDf) /************************************* 20.5 Get all data not accessed since one year*******************/ getOneYearOldData(parsedDf) } /*********************************** 20.2 Working with time *****************************/ def workingWithTime(spark:SparkSession):Unit={ import spark.implicits._ /*************************************** 20.2.1 current_date *****************************/ val dateDf=spark.range(1).select(current_date().as("date")) dateDf.show(false) /*************************************** 20.2.2 current_timestamp *****************************/ val timeStampDf=spark.range(1).select(current_timestamp()) timeStampDf.show(false) /*************************************** 20.2.3 date_format *****************************/ val c=date_format($"date","dd/MM/yyyy") import org.apache.spark.sql.catalyst.expressions.DateFormatClass val dfc = c.expr.asInstanceOf[DateFormatClass] println(dfc.prettyName) println(dfc.numberedTreeString) //show only the year dateDf.select(date_format($"date","y")).show() //change the date with the defined date_format c. dateDf.select(c).show() /*************************************** 20.2.4 to_date *****************************/ val toDateDf=dateDf.select(to_date($"date","yyyy/MM/dd").as("to_date")) toDateDf.show(5) /************************************** 20.2.5 to_timestamp ************************/ val toTimestampDf=dateDf.select(to_timestamp($"date","dd/MM/yyyy").as("to_timestamp")) toTimestampDf.show(1) /*get year, month, day*/ val yearDf=toTimestampDf.select(year($"to_timestamp")).distinct() yearDf.show(1) val monthDf=toTimestampDf.select(month($"to_timestamp")).distinct() monthDf.show(1) val dayDf=toTimestampDf.select(dayofyear($"to_timestamp")).distinct() dayDf.show(1) /************************************ 20.2.6 unix_timestamp ***********************/ val unixTimeDf=dateDf.select(unix_timestamp($"date","MM/dd/yyyy").as("unix_timestamp")) unixTimeDf.show(1) //We can't use the function such as year, month, dayofyear with the digit of unix timestamp /*val yearDf=unixTimeDf.select(year($"unix_timestamp")).distinct() yearDf.show(1) val monthDf=unixTimeDf.select(month($"unix_timestamp")).distinct() monthDf.show(1) val dayDf=unixTimeDf.select(dayofyear($"unix_timestamp")).distinct() dayDf.show(1)*/ /************************************ 20.2.7 windows ***********************/ /* What are windows and what are they good for? * * Consider the example of a traffic sensor that counts every 15 seconds the number of vehicles passing a certain * location. The resulting stream could look like: * 9, 6, 8, 4, 7, 3, 8, 4, 2, 1, 3, 2(1st element in the stream) -> * * If you would like to know, how many vehicles passed that location, you would simply sum the individual counts. * However, the nature of a sensor stream is that it continuously produces data. Such a stream never ends and it * is not possible to compute a final sum that can be returned. Instead, it is possible to compute rolling sums, * i.e., return for each input event an updated sum record. This would yield a new stream of partial sums like: * 57, 48, 42, 34, 30, 23, 20, 12, 8, 6, 5, 2 * * However, a stream of partial sums might not be what we are looking for, because it constantly updates the * count and even more important, some information such as variation over time is lost. Hence, we might want to * rephrase our question and ask for the number of cars that pass the location every minute. This requires us to * group the elements of the stream into finite sets, each set corresponding to sixty seconds. This operation is * called a "tumbling windows operation". We can have a tumbling window like: * [9,6,8,4], [7,3,8,4], [2,1,3,2] -> for each window we have a sum 27, 22, 8. * * Tumbling windows discretize a stream into non-overlapping windows. For certain applications it is important * that windows are not disjunct because an application might require smoothed aggregates. For example, we can * compute every thirty seconds the number of cars passed in the last minute. Such windows are called "sliding windows". * * */ val levels = Seq( // (year, month, dayOfMonth, hour, minute, second) ((2012, 12, 12, 12, 12, 12), 5), ((2012, 12, 12, 12, 12, 14), 9), ((2012, 12, 12, 13, 13, 14), 4), ((2016, 8, 13, 0, 0, 0), 10), ((2017, 5, 27, 0, 0, 0), 15)). map { case ((yy, mm, dd, h, m, s), a) => (LocalDateTime.of(yy, mm, dd, h, m, s), a) }. map { case (ts, a) => (Timestamp.valueOf(ts), a) }. toDF("time", "level") levels.show(5) val timeWindow=levels.select(window($"time","5 seconds"),$"level") timeWindow.show(5,false) timeWindow.printSchema val sums=timeWindow.groupBy($"window") .agg(sum("level").as("level_sum")) .select($"window.start",$"window.end",$"level_sum") sums.show(false) } /*********************************** 20.3 Finding duplicate *****************************/ /** * This functions filter the data by its size first, then find duplicate of the filtered data. We use to column FileID * to distinguish the data, it's built based on the file name and size. So it's not very accurate way to detect * duplicated data. */ def findingDuplicate(parsedDf:DataFrame,size:Long):Unit={ val spark=parsedDf.sparkSession import spark.implicits._ val duplicatedFile=parsedDf.filter($"Size">size) // only check files has a minimun size .groupBy($"FileID") // group files by their ID // After groupby, use aggregation function to calculate total size and count for each file .agg(sum("Size").alias("fileSize"), count($"FileID").alias("count")) .filter($"count">1) // select the file which has more than 1 count .orderBy($"fileSize".desc) // order the data frame by file size duplicatedFile.show(5,false) val duplicatedFileWithParentDir=duplicatedFile.join(parsedDf,Seq("FileID"),joinType = "inner") .select("FileID","fileSize","count","ParentDir","LastAccessTime").orderBy("FileID") duplicatedFileWithParentDir.show(5,false) } /******************************** 20.4 Basic Stats *************************************/ /** * In this function, we will do some basic stats such as */ def basicStats(parsedDf: DataFrame):Unit={ val spark:SparkSession=parsedDf.sparkSession import spark.implicits._ //Total fastq file count and size val allFastqs=parsedDf.filter($"Extention"==="fastq") // allFastqs.show(5) val allFastqsNum=allFastqs.count() println(s"All fastq file number count: ${allFastqsNum}") /* utec02 fastq*/ val utec02Fastq=allFastqs.filter($"Utec".startsWith("pt2")) val utec02AllFastqsNum=utec02Fastq.count() println(s"All fastq file number count of Utec 02: ${utec02AllFastqsNum}") /* Get all gpfs file size and fastq size. */ val allSize=parsedDf.agg(sum("Size")).first.get(0) val totalFastqSize=allFastqs.agg(sum("Size")).first.get(0) val utec02FastqSize=utec02Fastq.agg(sum("Size")).first.get(0) val HAllFastqSize=getSize(totalFastqSize.asInstanceOf[Number].longValue) val HAllSize=getSize(allSize.asInstanceOf[Number].longValue) val HUtec02FastqSize=getSize(utec02FastqSize.asInstanceOf[Number].longValue) println(s"All file size in GPFS: ${HAllSize}") println(s"All fastq file size in GPFS: ${HAllFastqSize}") println(s"All fastq file size of UTEC02 in GPFS: ${HUtec02FastqSize}") /* Get all file extention, and count total */ val distinctFileType=parsedDf.select("Extention").distinct().filter(length($"Extention")<10) distinctFileType.show(5,false) distinctFileType.count /* Count file number and size of each extention type*/ val fileTypeCount=parsedDf.filter(length($"Extention")<10).groupBy("Extention").count().orderBy($"count".desc) fileTypeCount.show(10) val fileTypeSortBySize=parsedDf.groupBy($"Extention").agg(expr("sum(Size) as TotalSize")).orderBy($"TotalSize".desc) .withColumn("HTotalSize",expr("getSize(TotalSize)")) fileTypeSortBySize.show(10) } /**************************** 20.5 Get all data not accessed since one year *************************/ def getOneYearOldData(parsedDF:DataFrame):Unit={ val spark:SparkSession=parsedDF.sparkSession import spark.implicits._ parsedDF.show(1) val allDataCount=parsedDF.count() /*There are two options based on our data set * 1. As we have the column Atime which is a unix timestamp with Long type, we can use the current time-oneYear unix * time length to get the last year max value. And all value < the max value is older than one year*/ // one year length in unix timestamp val oneYearLength=31536000L // current time val ctime:Long= Instant.now.getEpochSecond val oneYearMax=ctime-oneYearLength /* 1533686400 Is equivalent to 08/08/2018*/ /* test the value*/ println(s"ctime is ${ctime}, last year is ${oneYearMax}") // Filter all data older than one year max val oldDataDf=parsedDF.filter($"Atime"<oneYearMax) val oldDataCount=oldDataDf.count() println(s"Old data number count: $oldDataCount, all data count: $allDataCount") oldDataDf.show(5) /* 2. We can use the column LastAccessTime which has string format date, then we can convert the string to time * stamp. Then we use the build in function year, day of year to get day older than one year. * * For example, we have date in 2016, 2017,2018, 2019. The current date is 2019-08-08 (day of year is 220), * Step1, we know 2016, 2017 < 2019-1. So they must be older than one year * Step2 , for date in 2018, if dayofyear of the day < 220, we know it older than one year * * */ /*val dateDf=parsedDF.withColumn("timestamp",to_timestamp($"LastAccessTime","yyyy-MM-dd")) //dateDf.printSchema() dateDf.show(1,false) /* Step1. first we filter the date < current year-1*/ val currentYear=Year.now.getValue val oldData1=dateDf.filter(year($"timestamp")<currentYear-1) val old1Count=oldData1.count() /* Step2. We filter teh the date = current_year-1 and dayofYear > current dayofYear*/ val currentDayofYear=Calendar.getInstance.get(Calendar.DAY_OF_YEAR) println(s"Current day of year: ${currentDayofYear}") val oldData2=dateDf.filter((year($"timestamp")===currentYear) && (dayofyear($"timestamp")<100)) val oldAllData=oldData1.union(oldData2) val old2Count=oldData2.count() val oldDataCount=oldAllData.count() println(s"Old data number count: $oldDataCount, all data count: $allDataCount") println(s"Old1 count: $old1Count, Old2 count: $old2Count") oldAllData.show(1)*/ } /**************Helper class for curate raw data to the new data frame ****************************/ def createParentDirCol(fullDf:DataFrame,colName:String,colVaule:String):List[String]={ val spark=fullDf.sparkSession import spark.implicits._ fullDf.filter(col(colName)===colVaule).select("ParentDir").map(_.getString(0)).collect.toList } def getSize(rawSize:Long): String ={ val unit:Array[String]=Array("B","KB","MB","GB","TB") var index=0 var tmpSize:Long=rawSize while(tmpSize>=1024){ tmpSize=tmpSize/1024 index+=1 } return tmpSize+unit(index) } def getFileName(fullPath:String):String={ val fileName=fullPath.substring(fullPath.lastIndexOf("/")+1) return fileName } def getParentDir(fullPath:String):String={ val index=fullPath.lastIndexOf("/") if(index>0&&index<fullPath.length){ val parentDir=fullPath.substring(0,fullPath.lastIndexOf("/")) return parentDir} else return "None" } def getDateInMillis(date:String):Long={ val format=new java.text.SimpleDateFormat("m/dd/yyyy") val time=format.parse(date).getTime()/1000 return time } def getDate(rawDate:Long):String={ val timeInMillis = System.currentTimeMillis() val instant = Instant.ofEpochSecond(rawDate) val zonedDateTimeUtc= ZonedDateTime.ofInstant(instant,ZoneId.of("UTC")) val zonedDateTimeCet=ZonedDateTime.ofInstant(instant,ZoneId.of("CET")) zonedDateTimeUtc.toString } def getExtention(fileName:String):String={ val index=fileName.lastIndexOf(".") if(index>0)return fileName.substring(index+1) else return "None" } def buildID(fileName:String,Size:Long):String={ return fileName.concat(Size.toString) } def getUtec(fullPath:String):String={ if(fullPath.contains("pt")&&fullPath.length>3){ return fullPath.substring(0,3) } else return "Others" } }
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson17_Analyze_Clinical_Data/Lesson17_Analyze_Clinical_Data.scala
<reponame>pengfei99/Spark<gh_stars>0 package org.pengfei.Lesson17_Analyze_Clinical_Data import com.typesafe.config.ConfigFactory import org.apache.log4j.{Level, Logger} import org.apache.spark.broadcast.Broadcast import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.StringType import scala.collection.immutable.ListMap object Lesson17_Analyze_Clinical_Data { /************************************************************************************************************ * ***************************** 17.1 Introduction ********************************************************* * *********************************************************************************************************/ /* In this Lesson, we will learn how to transform a dataset into a specific format. The raw dataset is provided by * a hospital study, we need to transform it with a specific format and load it into a bio data warehouse named * transmart. Transmart is not a real data warehouse in a computer scientist opinion but its close enough. * In this lesson, we will learn: * - 2. read data from excel * - 3. preliminary analyze on raw data * - 4. build new columns based on duplicate rows, * - 5. deal with duplicates rows/null values/change column names * - 6. Merge columns * - 7. Joining data * - 8. Compare two columns if they have the same value for each row * - 9. Other helping function * */ val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark") val path= sparkConfig.getString("sourceDataPath") /******************************************* Configuration ***************************************/ val csvFile=s"${path}/spark_lessons/Lesson17_Analyse_Clinical_Data/raw_data.csv" val outputPath=s"${path}/spark_lessons/Lesson17_Analyse_Clinical_Data" val nullValue="null" val timePointColName="Time_Point" val patientIdColName="Patient" val separator="_" val v1="v1" val v2="v2" //config for output csv to match with transmart requirements val studyID="Realism01" val subjID="SUBJ_ID" val outputCsvDelimiter="\t" def main(args:Array[String]):Unit= { Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder().appName("Lesson17_Analyze_Clinical_Data").master("local[2]").getOrCreate() import spark.implicits._ /************************************************************************************************************ * ***************************** 17.2 Read data from excel ********************************************************* * *********************************************************************************************************/ /* Here we use a 3rd party lib to read excel data, you can find the maven dependence * <dependency> <groupId>com.crealytics</groupId> <artifactId>spark-excel_2.11</artifactId> <version>0.10.1</version> </dependency> * * But we have problem with encodings, the excel by default does not use utf8 as encoding, so the special character * will not be printed normally. To resolve this encoding problem, we use lib-office to export a csv with utf-8 * */ /*val filePath = "/DATA/data_set/spark/basics/Lesson17_Analyse_Clinical_Data/raw_data.xlsx" val df = spark.read .format("com.crealytics.spark.excel") .option("sheetName", "workable_long_missing_type") // Required .option("useHeader", "true") // Required .option("treatEmptyValuesAsNulls", "true") // Optional, default: true .option("inferSchema", "true") // Optional, default: false .load(filePath)*/ /* val filePath = "/DATA/data_set/spark/basics/Lesson17_Analyse_Clinical_Data/sample.xlsx" val df = spark.read .format("com.crealytics.spark.excel") .option("sheetName", "Feuil1") // Required .option("useHeader", "true") // Required .option("treatEmptyValuesAsNulls", "true") // Optional, default: true .option("inferSchema", "true") // Optional, default: false .load(filePath) df.show(1) val columNum=df.columns.length println(s"columNum is $columNum") */ /****************************************************************************************************** * ************************************17.3 Preliminary analyze on raw data ***************************** * ******************************************************************************************************/ /* Before we start the transformation, we need to understand the data. */ val csvDF=spark.read.option("inferSchema", true).option("header",true) .option("nullValue"," ") .option("encoding", "UTF-8") .option("delimiter",",").csv(csvFile) csvDF.cache() //csvDF.show(1) /**********************get raw data size (i.e. rows and columns)************************/ /*val columnNum=csvDF.columns.length val rowNum=csvDF.count() println(s"csv column Num is $columnNum, row number is $rowNum")*/ /* csv column Num is 470, row number is 117124 */ //csvDF.printSchema() /*********************** get rows of each patient and all possible row numbers*********************/ /* With the below code, we know we have multiple rows for one patient, but the target warehouse only allow one * row for each distinct patient, now we need to know why we have multiple rows.*/ val patientRowCount=csvDF.groupBy("Patient").count() //patientRowCount.select("count").distinct().show(10) /* all possible patient row number | 188| | 94| | 235| | 141| | 329| | 282| | 47|*/ /****************** Discover the data is time related *********************************/ /* We find the column TP_Class describes the date of when the data is recorded, so we can conclude that the data * is time related*/ val timePointValues=csvDF.select("TP_Class").distinct().orderBy($"TP_Class".asc) // timePointValues.show() /* * D28| | D5-D7 | D60| | HV| | D1-D2| | D1| | D3-D4| | D2| | D0| | D14| * */ /*********************************************************************************************************** * ****************** 17.4 Build new columns based on duplicate rows of patients ************************** * ******************************************************************************************************/ /*************************************17.4.1 Change date value for easier sorting *************************/ /* We noticed that with the current time point column, when we do sorting, the value does not sort in good order * So we need to change the value as shown below * */ /* | D0->Day 00| | D1->Day 01| | D1-D2->Day 01-Day 02| | D14->Day 14| | D2->Day 02| | D28->Day 28| | D3-D4->Day 03-Day 04| | D5-D7->Day 05-Day 07| | D60->Day 60| | HV->Day 00| * */ /* The detail of the implementation is encapsulate in the function ModifyTimePoint */ val dfWithTP= ModifyTimePoint(csvDF) /************************ 17.4.2 Build column Based on the patient time point row ***********************/ /* As our raw data has multiple rows on a patient, each row represent specific data collect at a specific time point. * We have two different scenarios : * Scenario 1. We don't have the explicite column name, we need to build column name for each value * for example, we have Patient_id | Time_Point | Coag_Sofa_Score * 1004 * |-- D0 * |- v1 * * |-- D1 * |- v2 * All the rows in which columns such as age, sex, etc will have duplicate data for patient 1004 * To eliminate all duplicate data and make data more easier to load into the data warehouse, we need to * transform all the rows into columns * For example, the new dataframe should looks like * Patient_id | D0_Coag_Sofa | D1_Coag_Sofa_Score * 1004 | v1 | v2 * * Scenario 2. We have column name in the row, for example * Patient_id | Time_Point | marker_name | marker_value * 1004 * |-- D0 * |- n1 | v1 * |- n2 | v2 * |- ... * |-- D1 * |- n1 | v1 * * The output must be * * * Patient_id | D0_n1 | D0_n2 | ... | D1_d1n1 | ... * 1004 | v1 | V2 | ... | d1v1 * * * */ /*********************************************************************************************************** * ************************** 17.4.3 SOFA time point related data treatment *************************** * ******************************************************************************************************/ /* SOFA data is in scenario 1 */ val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA", "CBD_Dobut_SOFA","CBD_Hepat_SOFA", "CBD_Neuro_SOFA","CBD_Renal_SOFA", "CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA") val utilityColumns=Array(patientIdColName,timePointColName) /* // build a small test dataset to test the correctness of the function val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA") val patient1088=dfWithTP.filter($"Patient"===1088) val sofaTest=BuildColumnsWithTimePointS1(patient1088,sofaValueColumns,utilityColumns) val sofaTestRowNum=sofaTest.count() val sofaTestColNum=sofaTest.columns.length println(s"sofa row number is ${sofaTestRowNum}, sofa column number is ${sofaTestColNum}") */ /* The implementation of how we transform duplicate rows into columns is in function BuildColumnsWithTimePointS1, * and this function is strongly depends on function rowToColumn. The rowToColumn is the core function which can * transform duplicate rows of a column into multiple columns.*/ /* val sofaTPData=BuildColumnsWithTimePointS1(dfWithTP,sofaValueColumns,utilityColumns) val sofaRowNum=sofaTPData.count() val sofaColNum=sofaTPData.columns.length*/ // sofa row number is 552, sofa column number is 82 // println(s"sofa row number is ${sofaRowNum}, sofa column number is ${sofaColNum}") /*********************************************************************************************************** * ************************** 17.4.4 BioMarker time point related data treatment *************************** * ******************************************************************************************************/ /* BioMarker data is in scenario 2*/ /* raw dataframe +-------+----------+---------------+--------+----------------+------------------+-------------+ |Patient|Time_Point| Platform| Marker| Value|Missing_Value_Type|Imputed_Value| +-------+----------+---------------+--------+----------------+------------------+-------------+ | 5001| D00| ELLA_EDTA| IL6| 1,964| null| 0| | 5001| D00|ELLA_TRUCULTURE| IL2_NUL| null| OOR<| 0| result dataframe (build 3 new column) Column name rules: platform-BioMarker-TimePoint-(value|Missing_Value_Type|Imputed_Value) +-------+--------------------------+ |Patient| ELLA_EDTA-IL6-D00-value| ELLA_EDTA-IL6-D00-Missing_Value_Type | ELLA_EDTA-IL6-D01-Imputed_Value +-------+--------------------------+ | 5001| 1,964| null | 0(0->false, 1->true) -Biomarker |--Platform |--ELLA_EDTA |--IL6 |--D0 | value | Missing_Value_Type | Imputed_Value |--D1 |--... |--IL10 |--ELLA_TRUCULTURE |--TNFa_LPS |--... So the total column_number= distinct(platform+marker)*timePoint*3 */ /*// build a small test dataset val patient1088=dfWithTP.filter($"Patient"===1088) //tested with different platform sub dataset, for example, ELLA_EDTA only has two rows for patient 1088 val markerTestDf=patient1088.filter($"Platform"==="ELLA_TRUCULTURE") markerTestDf.show(5,false) val test=BuildColumnsWithTimePointS2(markerTestDf) test.show(5, false)*/ /* Test with Cytometry Tcells dataset*/ /*Step 0: build columns with Time point */ val tCellsCols= Array("Treg_Percentage","Treg_cells_per_µl","T_cells_percentage","T_cells_per_µl","T4_cells_percentage", "T4_cells_per_µl","T8_cells_percentage","T8_cells_per_µl","Ratio_T4_T8_percentage","T4_T8_cells_percentage") val fullSortedTCellsCols=generateFullCols("FLOW_CYTOMETRY",tCellsCols) val dfCyto=dfWithTP.filter(col("Plateform")==="FLOW_CYTOMETRY") val dfTCells=dfCyto.filter(col("Marker").isin(tCellsCols: _*)) val tCellsTPRawData=BuildColumnsWithTimePointS2(dfTCells) tCellsTPRawData.count() tCellsTPRawData.columns.size /* Step 1: normalize data for transmart format*/ val tCellsData=NormalizeColNameForTransmart(tCellsTPRawData, fullSortedTCellsCols) //neutroData.show(5) /*Step2 : change col name*/ /*Step3 : check null cell count for each column*/ /* Step 4 : fill null value with transmart required value*/ /* Step 5: Write data to disk*/ // WriteDataToDisk(tCellsData,"hdfs://hadoop-nn.bioaster.org:9000/realism/output","FC_TCells_V3") /* Step 6 : Get column name number mapping */ val tCellsColNameNumMapping=getColumnNumNameMapping(tCellsData) tCellsColNameNumMapping.foreach(println) /* // The implementation of bioMarker transformation is done in BuildColumnsWithTimePointS2, it's also strongly depends on // the rowToColumn function val bioMarkerWithTPColData=BuildColumnsWithTimePointS2(dfWithTP) bioMarkerWithTPColData.show(5,false) val rowNum=bioMarkerWithTPColData.count() val colNum=bioMarkerWithTPColData.columns.length // The row number is 552, the column number is 1270, the row number is 552 which is correct of the total patient number println(s"The row number is ${rowNum}, the column number is ${colNum}") */ /************************************************************************************************************* ************* 17.5 Deal with null value cell/ remove duplicate columns/ change column name ****************** ********************************************************************************************************/ /* To see how we deal with null value, duplicate columns, change column name, we use a sub-dataframe which includes * all demographic data of the patient. * * The implementation is done in the function ExportDemographicData. This function calls: * - changeColName * - countNullValue * - fillTransmartNullForDigitCol * - fillTransmartNullForStrCol * - replaceSpecValue * * */ // ExportDemographicData(csvDF) /*********************************************************************************************************** * ******************************************* 17.6 Merge column ****************************************** * ******************************************************************************************************/ /* As we explained before, the raw data is built based on two study version, v1 does not support time point, v2 * supports the time point, The v1 column name contains Theoretical_D1 (e.g. CBD_Cardio_SOFA_Theoretical_D1). The * v2 column names are without Theoretical_D1 (e.g. CBD_Cardio_SOFA). There is another column which indicates the * time point. We want to merge the v1 column with v2 of day 01 column. * * The implementation is done in function ExportMergedSofa, it can be divide into three main steps: * - Build sofa v1 dataframe which contains all sofa v1 columns * - Build sofa v2 dataframe, we need to transform the duplicated row to column with time point. * - Merge sofa v1 and v2 columns, this steps calls function mergeSofaColumns, which is the core function of merge. * */ //val fullSofaDf=ExportMergedSofa(dfWithTP) /*********************************************************************************************************** * ******************************* 17.7 Joining data ***************************************************** * ******************************************************************************************************/ /* * As we merged the sofa score of v1 and v2, we need to build a new column for indicating the data's study version * The REALISM_patient_list.csv contains two column, Patient v1 column contains all patient id of version1. * Patient v2 column contains all patient id of version 2. */ val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark") val path= sparkConfig.getString("sourceDataPath") val patientListDf=spark.read.option("inferSchema", true).option("header",true) .option("nullValue"," ") .option("encoding", "UTF-8") .option("delimiter",",") .csv(s"${path}/Lesson17_Analyse_Clinical_Data/REALISM_patient_list.csv") //patientListDf.show(5) ExportPartientStudyVersion(patientListDf,csvDF) /*********************************************************************************************************** * ******************************************* 17.8 Compare two columns ******************************** * ******************************************************************************************************/ /*********************************************************************************************************** * ******************************************* 17.9 Other helping function ******************************** * ******************************************************************************************************/ /* We also developed a few helping function which can the export to transmart much easier. You can find them * in the following list: * - NormalizedColNameForTransmart * - getDistinctValueOfColumns (reusable in any dataframe) * - removeRowsWithSpecValues (reusable in any dataframe) * - getColumnNumNameMapping (reusable in any dataframe) * - WriteDataToDisk * - GetStatsOfEachSubGroup * * The function with Capital letter have dependencies(e.g. column names) with this data, so it can't be used in * other data directly. * */ } /** * This function get the patient study version of Realism, if the given column belongs to v2, patient of v1 must * have value null, if not null, patient belongs v2 * @author <NAME> * @version 1.0 * @since 2019-02-13 * @param patientListDf is a dataframe built from REALISM_patient_list.csv * @param df is the main source dataframe where we get all the study data * @return DataFrame * */ def ExportPartientStudyVersion(patientListDf:DataFrame,df:DataFrame):DataFrame={ /* Step1: build a dataframe for Patient v1 which contains patientId, and study_version which contains value only * for patient of v1, */ // select column Patient v1 and drop null rows val patientV1=patientListDf.select("Patient v1").distinct().na.drop() //val patient_v1_count=patientV1.count() val patientV1Df=patientV1.withColumnRenamed("Patient v1","Patient") .withColumn("Study_version",lit(v1)) /* Step2: build a dataframe for Patient v2 which contains patientId, and study_version which contains value only * for patient of v2, */ val patientV2=patientListDf.select("Patient v2").distinct().na.drop() val patientV2Df=patientV2.withColumnRenamed("Patient v2","Patient") .withColumn("Study_version",lit(v2)) //val patient_v2_count=patientV2.count() // println(s"V1 has ${patient_v1_count} patients, V2 has ${patient_v2_count} patients, In total: ${patient_v1_count+patient_v2_count}") /* Step3: Union df p1 and p2,*/ val patientVersionFullDf=patientV1Df.union(patientV2Df) /* Step4: build a dataframe for subgroup*/ val patientSubGroupDf=df.select("Patient","Subgroup").dropDuplicates(); /* Step5: Join the two dataframe*/ val patientVersionGroupDf=patientSubGroupDf.join(patientVersionFullDf,Seq(patientIdColName),joinType = "inner") patientVersionGroupDf.show(5,false) /* Step6: count null value*/ //countNullValue(patientVersionGroupDf) /* Step7: replace null*/ /* no null value found */ /* Step8: normalize data for transmart format */ val versionGroupColumns=Array("Subgroup","Study_version"); val patientVersionGroupFinalDf=NormalizeColNameForTransmart(patientVersionGroupDf,versionGroupColumns) patientVersionGroupFinalDf.show(5,false) /* Step9: output data to disk */ WriteDataToDisk(patientVersionGroupFinalDf,"/tmp/Realism","version_group_data") /************************* Annexe ********************/ /* We find out that, the patient list file has 553 patient, and the source data file has 552 patient. The following * code find out which patient in patient list is not present in the source data file. The patient 1006 refused to * publish his data, so he is in the patient list but not in the source data file. So we need to remove it from * patientVersionFullDf*/ /*val patientSource=df.select("Patient") val patientNotInSource=patientVersionFullDf.select("Patient").except(patientSource) patientNotInSource.show()*/ return df } /************************************************************************************************************* ************************************ 17.4 Function Implementation ************************************* **********************************************************************************************************/ /*********************************** 17.4.1 Change date value for easier sorting *******************************/ def ModifyTimePoint(df:DataFrame):DataFrame={ val spark=df.sparkSession spark.udf.register("changeTimePoint",(timePoint:String)=>changeTimePoint(timePoint)) val dfWithNewTimePoint=df.withColumn("Time_Point",expr("changeTimePoint(TP_Class)")) dfWithNewTimePoint.select("TP_Class","Time_Point").distinct().show(10) /* //The following code write the result dataframe on disk dfWithNewTimePoint.coalesce(1).write.mode(SaveMode.Overwrite) .option("header","true") .option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of crc files .option("encoding", "UTF-8") .csv(outputPath+"/TimePoint") */ return dfWithNewTimePoint } def changeTimePoint(timePoint:String):String={ timePoint match { case "D0" => "D00" case "D1" => "D01" case "D1-D2" => "D01-D02" case "D2" => "D02" case "D3-D4" => "D03-D04" case "D5-D7" => "D05-D07" case "D14" => "D14" case "D28" => "D28" case "D60" => "D60" case "HV" => "D00" case _=>"null" } } /*********************************** 17.4.3 SOFA time point related data treatment *******************************/ /** * This method transform the raw data of scenario 1 to column with time point * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame in which the transformation will take place. * @param valueColumns allColumns is a list of String which contains all the column name * the name of the newly created column name. * @param utilityColumns utilityColumns * become a new row in the corresponding filed column * @return a data frame which contains the result of the transformation * */ def BuildColumnsWithTimePointS1(df:DataFrame,valueColumns:Array[String],utilityColumns:Array[String]):DataFrame={ /* change row to column with time_point in consideration * here is the origin data frame * +-------+----------+--------------- |Patient|Time_Point|CBD_Cardio_SOFA| +-------+----------+---------------+ | 1004| D03-D04| 1| | 1004| D05-D07| 1| | 1004| D28| null| | 1004| D01-D02| 4| | 1007| ... * * Step.1 . Build filedId column * |Patient|CBD_Cardio_SOFA_ID|CBD_Cardio_SOFA_Value| +-------+----------+---------------+ | 1004| CBD_Cardio_SOFA.D03-D04| 1 | | 1004| CBD_Cardio_SOFA.D05-D07| 1 | | 1004| CBD_Cardio_SOFA.D28| null| | 1004| CBD_Cardio_SOFA.D01-D02| 4 | | 1007| ... * */ val spark=df.sparkSession import spark.implicits._ /*Step1. if filedId column does not exit, create filedId column */ //Get all filed value column name val allColumns=valueColumns.union(utilityColumns) println(s"allColumns ${allColumns.mkString(";")}") //Get all time point val allColumnData=df.select(allColumns.head,allColumns.tail:_*).dropDuplicates().orderBy(asc(patientIdColName)) allColumnData.show(10) /* no need to get the array of timePoint val timePoint=sofa_TPData.select("TP_Class").distinct().collect().map(_.get(0))*/ var tmp=allColumnData for(valueColumn<-valueColumns){ tmp=tmp.withColumn("tmp",lit(valueColumn)) /* do not put . in the column name, spark will think you want to access an attribute of the columne*/ .withColumn(valueColumn+"_Id",concat($"tmp",lit(separator),col(timePointColName))) .drop("tmp") } tmp.show(10) // tmp.printSchema() /* Here we need to loop over all elements in value column,*/ var result=tmp.select(patientIdColName).distinct().sort(asc(patientIdColName)) result.show(5) for(filedValueColumn<-valueColumns){ val filedColumnId=filedValueColumn+"_Id" val inter=RowToColumn(tmp,patientIdColName,filedColumnId,filedValueColumn) result=result.join(inter,Seq(patientIdColName),"inner") result.show(10) } return result } /****************** 17.4.4 BioMarker time point related data treatment *******************/ /** * This method transform the raw data of bioMarker Value/Missing_Value_Type/Imputed_Value to column with platform * name, marker name and time point * * @author <NAME> * @version 1.0 * @since 2018-12-28 * @param rawDf The source data frame in which the transformation will take place. * @return a data frame which contains the result of the transformation * */ def BuildColumnsWithTimePointS2(rawDf:DataFrame):DataFrame={ val spark=rawDf.sparkSession import spark.implicits._ /* In our case, the column which we want to transform are fixed, and we only called it once, so no need to set in * the config. */ val bioMarkerValueCol=Array("Value","Missing_Value_Type","Imputed_Value") val bioMarkerFiledIdCol=Array("Platform","Marker") val bioMarkerUtilityCol=Array(patientIdColName,timePointColName) val bioMarkerCol=bioMarkerUtilityCol.union(bioMarkerFiledIdCol).union(bioMarkerValueCol) /* //check the bioMarker data schema and if data contains many null bioMarkerData.printSchema() val allValue=bioMarkerData.count() val nonNullValue=bioMarkerData.filter($"Value".isNotNull).count() println(s"All value count is ${allValue}, nonNullValue count is ${nonNullValue}") */ /* val allPlateformMarkerTP=bioMarkerDataWith3FiledIdName.select("marker_tp").distinct().collect().toArray // All possible platform biomarker and time point combination number is 423, so we will have 423*3 more columns println(s"All possible platform biomarker and time point combination ${allPlateformMarkerTP.length}")*/ /*Step 0: clean the raw dataset, get only biomarker related columns and fill the null value with string "null"*/ val bioMarkerData=rawDf.select(bioMarkerCol.head,bioMarkerCol.tail:_*).dropDuplicates().orderBy(asc(patientIdColName)) val df=bioMarkerData.na.fill(nullValue) df.show(5) val bioMarkerDataWith3FiledIdName=df.withColumn("tmp",concat(col(bioMarkerFiledIdCol(0)),lit(separator),col(bioMarkerFiledIdCol(1)))) .withColumn("marker_tp",concat($"tmp",lit(separator),col(timePointColName))) .drop("tmp") .withColumn("marker_Value",concat($"marker_tp",lit("/Value"))) .withColumn("marker_Missing_Value_Type",concat($"marker_tp",lit("/Missing_Value_Type"))) .withColumn("marker_Imputed_Value",concat($"marker_tp",lit("/Imputed_Value"))) bioMarkerDataWith3FiledIdName.show(5,false) var result=df.select(patientIdColName).distinct().sort(asc(patientIdColName)) for(filedValueColName<-bioMarkerValueCol){ println(s"Current working column name : ${filedValueColName}") val filedIdColName="marker_"+filedValueColName val inter=RowToColumn(bioMarkerDataWith3FiledIdName,patientIdColName,filedIdColName,filedValueColName) result=result.join(inter,Seq(patientIdColName),"inner") } result.show(1, false) //Sort the output column so the Value/Missing_Value_Type/Imputed_Value of a marker are together val sortedColumnName=Array("Patient")++result.columns.sorted.filter(!_.equals("Patient")) println(s"The sorted Column Name is ${sortedColumnName.mkString(";")}") result=result.select(sortedColumnName.head,sortedColumnName.tail:_*) return result } /*************************************17.4 Core function rowToColumn *******************************************/ /** * This method transform multi rows of an object into columns, after the transformation, for each object we only have * one row in the data frame. To make the transformation, this method cast all column to type string, as we don't do * arthimetic operations here. So it won't be a problem. You can change the type back to Int, or double after the * transformation. * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param rawDf The source data frame in which the transformation will take place. * @param targetIdColumnName The column in the data frame which contains the name of the filed. Each row will become * the name of the newly created column name. * @param targetValueColumnName The column in the data frame which contains the value of the filed. Each row will * become a new row in the corresponding filed column * @return a data frame which contains the result of the transformation * * */ def RowToColumn(rawDf: DataFrame, objectIdColumnName:String,targetIdColumnName: String,targetValueColumnName:String): DataFrame = { val spark = rawDf.sparkSession import spark.implicits._ /* Step0. Eliminate all null rows, it may create a null dataframe (dataframe without rows), we can't build columns * with no rows, so we need to fill null with a null value which will not cause null pointer exception. * As a result, we cast all columns to string type and fill the null cell with pre-defined nullValue(String)*/ val df = rawDf.select(rawDf.columns.map(c => col(c).cast(StringType)) : _*).na.fill(nullValue) /* Step1. Get all possible filedIDs, which will be become the column name of each filed */ val filedIDs = df.select(targetIdColumnName).distinct().orderBy(df(targetIdColumnName).asc) filedIDs.show(10,false) // convert the column in the data frame which contains the filed Ids to an Array of the filed Ids. val filedIDsArray: Array[String] = filedIDs.collect().map(_.get(0).toString) /* Step2. Build the (filedId,filedValue) <key,value> map for each row. */ /* We have two solutions to do this. * Solution 1 : build a user define function which build a map * Solution 2 : Spark provide map function which can build a map based on two columns * Here we choose Solution 2 , spark native function is always better than udf.*/ // Solution 1: If we don't fill null value before, here we need to use Option type to avoid null pointer /*def buildFiledMap(filedName:String,filedValue:String):Map[String,Option[String]]={ if(filedValue.isEmpty) Map(filedName->None) else Map(filedName->Option(filedValue)) } spark.udf.register("buildFiledMap",(arg1:String,arg2:String)=>buildFiledMap(arg1,arg2)) val filedIdValueMap=df.withColumn("filed_map",expr(s"buildFiledMap(${targetIdColumnName},${targetValueColumnName})"))*/ /* def buildFiledMap(filedName:String,filedValue:String):Map[String,String]={ if(filedValue.isEmpty) Map(filedName->"null") else Map(filedName->filedValue) } spark.udf.register("buildFiledMap",(arg1:String,arg2:String)=>buildFiledMap(arg1,arg2)) val filedIdValueMap=df.withColumn("filed_map",expr(s"buildFiledMap(${targetIdColumnName},${targetValueColumnName})")) */ /* Solution 2 : The spark native map function * The map function by default does not deal with null value, so if we have null value in the two columns you will * have x->, or ->y, when you have functions to call these null values, you will have null pointer exception. * The solution is to fill the null value with a string "null", **/ val filedIdValueMap = df.withColumn("filed_map", map(df(targetIdColumnName), df(targetValueColumnName))) filedIdValueMap.show(5,false) /* Step3. Group the (filedId,filedValue) map for each distinct subject which may have multiple rows. Each row has * a map. After group, we concatenate all maps of a subject into one single map. Here, we used collect_list, there is * another similar function collect_set, which list returns an ordered sequence of elements, set returns an unordered * distinct list of elements, we know that, we will not have duplicate filedId for one subject. so we don't need to use * set, we prefer to use list.*/ val groupedFiledIdValueMap = filedIdValueMap.groupBy(objectIdColumnName) .agg(collect_list("filed_map")) // return a list of map .as[(String, Seq[Map[String, String]])] // <-- leave Rows for typed pairs .map { case (id, list) => (id, list.reduce(_ ++ _)) } // <-- concatenate all maps to a single map // the reduce(_ ++ _) translates to reduce((a,b)=>a++b) where a, b are lists, ++ is a method in list interface // which concatenates list b to a. .toDF(objectIdColumnName, "filed_map") groupedFiledIdValueMap.show(10, false) /* Step 4. Create column for each fieldId based on the complete fieldId list, with the getFiledValue function, * */ val bFiledIDsArray: Broadcast[Array[String]] = spark.sparkContext.broadcast(filedIDsArray) def getFiledValue(filedId: String, filedMap: Map[String, String]): String = { //you can replace the empty (null) value as you want, here I tried empty string "", "null" and "." if(filedMap.isEmpty||filedId.isEmpty){nullValue} else { filedMap.getOrElse(filedId, nullValue) } } //spark.udf.register("getFiledValue", (arg1: String, arg2: Map[String, String]) => getFiledValue(arg1, arg2)) spark.udf.register("getFiledValue", getFiledValue(_:String, _: Map[String, String])) var tmpDf = groupedFiledIdValueMap (0 until bFiledIDsArray.value.length).map { i => val filedId: String = bFiledIDsArray.value(i) tmpDf = tmpDf.withColumn("current_id", lit(filedId)) .withColumn(filedId, expr("getFiledValue(current_id,filed_map)")) .drop("current_id") // The solution which takes a variable and a column does not work, because, the udf only allows column type as argument // //tmpDf=tmpDf.withColumn(filedId,getFiledValue(filedId,filed_map))) } val result=tmpDf.drop("filed_map") result.show(5,false) result } /*********************************************************************************************************** * *************** 17.5 Deal with null value cell/ duplicate columns/ change column name ********************** * ******************************************************************************************************/ def ExportDemographicData(df:DataFrame):Unit={ val spark=df.sparkSession; import spark.implicits._ // prepare demographicColumns, as demographicColumns are not time point related, so with drop duplicates, we get one // row per patient val demographicColumns=Array("Patient","Subgroup","DD_Gender","DD_Calculated_Age","DD_Height","DD_Weight","DD_BMI") val demographicData=df.select(demographicColumns.head, demographicColumns.tail: _*).dropDuplicates().orderBy($"Patient".asc) demographicData.show(10) // column rename map val nameMap=Map(("DD_Gender","Sex"),("DD_Calculated_Age","Age"),("DD_Height","Height"),("DD_Weight","Weight"),("DD_BMI","BMI")) /* Step 1 : normalize data for transmart format*/ val demoForTransmart=NormalizeColNameForTransmart(demographicData,demographicColumns) demoForTransmart.show(10,false) /* Step 2 : change column name*/ val demoRenamedDf=ChangeColName(demoForTransmart,nameMap) demoRenamedDf.show(10,false) /* Step 3 : check null value */ countNullValue(demoRenamedDf) /* Step 4 : fill null value with transmart required value (. for digit, Not Available for string)*/ /* We know Height, Weight, BMI are all digit columns, so we replace them with . * */ val demoFinalData=fillTransmartNullForDigitCol(demoRenamedDf,Array("Height","Weight","BMI"),nullValue) countNullValue(demoFinalData) /* Step 5 : output data to disk */ WriteDataToDisk(demoFinalData,"/tmp/Realism","demographic_data") } /** * This function takes a data frame,and a Map[oldColName,newColName], it will replace the old column name by the * new column name and returns the data frame with new names. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @param nameMap A Map of [oldColName,newColName] * @return DataFrame * */ def ChangeColName(df:DataFrame,nameMap:Map[String,String]):DataFrame={ val oldNames=nameMap.keySet.toArray var result=df for(oldName<-oldNames){ result=result.withColumnRenamed(oldName,nameMap.getOrElse(oldName,"No_keys")) } return result } /** * This function takes a data frame, it prints null value counts of all columns of the data frame * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @return Unit * */ def countNullValue(df:DataFrame):Unit={ val spark=df.sparkSession import spark.implicits._ for(colName<-df.columns){ val nullCount=df.filter(df(colName).isNull||df(colName).isNaN||df(colName)===""||df(colName)===nullValue).count() println(s"The null value count of the column $colName is $nullCount") } } /** * This function takes a data frame, a list of column names, and a user defined null value, it will replace the * default null (df.na) and user define null value in the data frame by the transmart digit null value in all * given columns of the data frame. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param rawDf The source data frame. * @param colNames A list of column names * @param userDefinedNull A string value which is defined by user in the data frame to represent null. * @return DataFrame * */ def fillTransmartNullForDigitCol(rawDf:DataFrame,colNames:Array[String],userDefinedNull:String):DataFrame={ val digitNull="." /*Step 0 : cast all column to string*/ val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*) //df.show(5) /*Step 1 : fill na with digitNull to the given column*/ val naFill=df.na.fill(digitNull,colNames) //naFill.show(5) /*Step 2: fill user defined null with digitNull*/ val result=replaceSpecValue(naFill,colNames,userDefinedNull,digitNull) result } /** * This function takes a data frame, a list of column names, and a user defined null value, it will replace the * default null (df.na) and user define null value in the data frame by the transmart String null value in all * given columns of the data frame. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param rawDf The source data frame. * @param colNames A list of column names * @param userDefinedNull A string value which is defined by user in the data frame to represent null. * @return DataFrame * */ def fillTransmartNullForStrCol(rawDf:DataFrame,colNames:Array[String],userDefinedNull:String):DataFrame={ val strNull="Not Available" /*Step 0 : cast all column to string*/ val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*) // df.show(5) /*Step 1 : fill na with digitNull to the given column*/ val naFill=df.na.fill(strNull,colNames) // naFill.show(5) /*Step 2: fill user defined null with digitNull*/ val result=replaceSpecValue(naFill,colNames,userDefinedNull,strNull) result } /** * This function takes a data frame, a list of column names, a old value, and a new value, it will replace the old * value by the new value in all given columns of the data frame. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param rawDf The source data frame. * @param colNames A list of column names * @param specValue A string value which needs to be replaced * @param newValue A string value which will repalce the old value * @return DataFrame * */ def replaceSpecValue(rawDf:DataFrame,colNames:Array[String],specValue:String,newValue:String):DataFrame={ /*Step 0 : cast all column to string*/ val spark=rawDf.sparkSession import spark.implicits._ val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*) /*Step 1 : transform spec value to null*/ var result=df for(colName<-colNames){ val newColName=colName+"_tmp" result=result.withColumn(newColName, when(result(colName) === specValue, newValue).otherwise(result(colName))) //create a tmp col with digitnull .drop(colName) //drop the old column .withColumnRenamed(newColName,colName) // rename the tmp to colName } result } /******************************************* 17.6 Merge column ******************************************/ def ExportMergedSofa(df:DataFrame):DataFrame={ val spark=df.sparkSession; import spark.implicits._ /***************************1. get Sofa v1 column *********************************/ val sofaD1=Array("Patient","CBD_Cardio_SOFA_Theoretical_D1","CBD_Coag_SOFA_Theoretical_D1", "CBD_Dobut_SOFA_Theoretical_D1","CBD_Hepat_SOFA_Theoretical_D1","CBD_Neuro_SOFA_Theoretical_D1","CBD_Renal_SOFA_Theoretical_D1", "CBD_Resp_SOFA_Theoretical_D1","CBD_SOFA_Theoretical_D1") val sofaD1Data=df.select(sofaD1.head,sofaD1.tail:_*).dropDuplicates().orderBy($"Patient".asc) /* Step0: to avoid null pointer exception, cast all columns to string and fill na with "null"*/ val sofaD1StrData = sofaD1Data.select(sofaD1Data.columns.map(c => col(c).cast(StringType)) : _*).na.fill(nullValue) /*Step1: normalize data for transmart format*/ // We drop the column STUDY_ID to avoid duplicate column after merge with the v2 column val sofaD1NormData=NormalizeColNameForTransmart(sofaD1StrData,sofaD1).drop("STUDY_ID") sofaD1NormData.show(3,false) /*Step2 : change col name*/ /*Step3 : check null col*/ //countNullValue(sofaD1NormData) /***************************2. get Sofa v2 column *********************************/ val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA", "CBD_Dobut_SOFA","CBD_Hepat_SOFA", "CBD_Neuro_SOFA","CBD_Renal_SOFA", "CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA") val utilityColumns=Array(patientIdColName,timePointColName) val allColumns=utilityColumns++sofaValueColumns /* Step0: pre-process data */ val sofaTPRawData=df.select(allColumns.head,allColumns.tail:_*).dropDuplicates().orderBy($"Patient".asc) /* We can conclude the refine process is correct, we have 981 null rows in D14, D28 and D60, before refine process, * we have 2452 rows, after we have 1471 rows */ val sofaTPRefinedData=removeRowsWithSpecValues(sofaTPRawData,"Time_Point",Array("D14","D28","D60")) sofaTPRefinedData.show(3,false) sofaTPRefinedData.cache() /*Step1: transform multi rows to columns*/ val sofaTPData=BuildColumnsWithTimePointS1(sofaTPRefinedData,sofaValueColumns,utilityColumns) /*Step2: normalize data for transmart format*/ val sofaTPNormData=NormalizeColNameForTransmart(sofaTPData,sofaTPData.columns.toArray) sofaTPNormData.show(3,false) /*Step3 : change col name*/ /*Step4 : check null col, all value columns has null values, so we need to do * fill null on all columns */ //countNullValue(sofaTPNormData) /***************************3. Merge the v1 and v2 column ************************/ /* We choose to merge V1 columns with V2 Day 01 columns, To do this we need to call function mergeSofaColumn which * can merge all elements in the sofaValueColumns, except "CBD_SOFA_NA", because it only exist for V2.*/ /*Step 1. join v1 and v2 dataframe */ val sofaFullDf = sofaTPNormData.join(sofaD1NormData, Seq("SUBJ_ID")) /*Step 2. merge v1 and v2 columns*/ val mergedSofaDf=mergeSofaColumns(sofaFullDf,sofaValueColumns) /***************************3. Normalize column order ************************/ /* To facilitate the usage, we sort the column with a given order*/ val sofaOrderedCol=Array( "STUDY_ID","SUBJ_ID", "CBD_Cardio_SOFA_D00","CBD_Cardio_SOFA_D01","CBD_Cardio_SOFA_D01-D02","CBD_Cardio_SOFA_D02","CBD_Cardio_SOFA_D03-D04","CBD_Cardio_SOFA_D05-D07", "CBD_Coag_SOFA_D00","CBD_Coag_SOFA_D01","CBD_Coag_SOFA_D01-D02","CBD_Coag_SOFA_D02","CBD_Coag_SOFA_D03-D04","CBD_Coag_SOFA_D05-D07", "CBD_Dobut_SOFA_D00","CBD_Dobut_SOFA_D01","CBD_Dobut_SOFA_D01-D02","CBD_Dobut_SOFA_D02","CBD_Dobut_SOFA_D03-D04","CBD_Dobut_SOFA_D05-D07", "CBD_Hepat_SOFA_D00","CBD_Hepat_SOFA_D01","CBD_Hepat_SOFA_D01-D02","CBD_Hepat_SOFA_D02","CBD_Hepat_SOFA_D03-D04","CBD_Hepat_SOFA_D05-D07", "CBD_Neuro_SOFA_D00","CBD_Neuro_SOFA_D01","CBD_Neuro_SOFA_D01-D02","CBD_Neuro_SOFA_D02","CBD_Neuro_SOFA_D03-D04","CBD_Neuro_SOFA_D05-D07", "CBD_Renal_SOFA_D00","CBD_Renal_SOFA_D01","CBD_Renal_SOFA_D01-D02","CBD_Renal_SOFA_D02","CBD_Renal_SOFA_D03-D04","CBD_Renal_SOFA_D05-D07", "CBD_Resp_SOFA_D00","CBD_Resp_SOFA_D01","CBD_Resp_SOFA_D01-D02","CBD_Resp_SOFA_D02","CBD_Resp_SOFA_D03-D04","CBD_Resp_SOFA_D05-D07", "CBD_SOFA_NA_D00","CBD_SOFA_NA_D01","CBD_SOFA_NA_D01-D02","CBD_SOFA_NA_D02","CBD_SOFA_NA_D03-D04","CBD_SOFA_NA_D05-D07", "CBD_SOFA_D00","CBD_SOFA_D01","CBD_SOFA_D01-D02","CBD_SOFA_D02","CBD_SOFA_D03-D04","CBD_SOFA_D05-D07") val sofaMergedAndOrderedDf=mergedSofaDf.select(sofaOrderedCol.head,sofaOrderedCol.tail:_*) sofaMergedAndOrderedDf.show(2,false) // write to disk, this version is for sanofi, all null value are "null" WriteDataToDisk(sofaMergedAndOrderedDf,"/tmp/Realism","SofaTP_data_sanofi") /************************4. replace null with transmart required null value ********/ /*/* string columns*/ val strColumns=Array("CBD_Dobut_SOFA_D00","CBD_Dobut_SOFA_D01","CBD_Dobut_SOFA_D01-D02","CBD_Dobut_SOFA_D02","CBD_Dobut_SOFA_D03-D04","CBD_Dobut_SOFA_D05-D07", "CBD_SOFA_NA_D00","CBD_SOFA_NA_D01","CBD_SOFA_NA_D01-D02","CBD_SOFA_NA_D02","CBD_SOFA_NA_D03-D04","CBD_SOFA_NA_D05-D07") val fillStr=fillTransmartNullForStrCol(mergedSofaDf,strColumns,nullValue) /* digit columns*/ val digitColumns=Array("CBD_Cardio_SOFA_D00","CBD_Cardio_SOFA_D01","CBD_Cardio_SOFA_D01-D02","CBD_Cardio_SOFA_D02","CBD_Cardio_SOFA_D03-D04","CBD_Cardio_SOFA_D05-D07", "CBD_Coag_SOFA_D00","CBD_Coag_SOFA_D01","CBD_Coag_SOFA_D01-D02","CBD_Coag_SOFA_D02","CBD_Coag_SOFA_D03-D04","CBD_Coag_SOFA_D05-D07", "CBD_Hepat_SOFA_D00","CBD_Hepat_SOFA_D01","CBD_Hepat_SOFA_D01-D02","CBD_Hepat_SOFA_D02","CBD_Hepat_SOFA_D03-D04","CBD_Hepat_SOFA_D05-D07", "CBD_Neuro_SOFA_D00","CBD_Neuro_SOFA_D01","CBD_Neuro_SOFA_D01-D02","CBD_Neuro_SOFA_D02","CBD_Neuro_SOFA_D03-D04","CBD_Neuro_SOFA_D05-D07", "CBD_Renal_SOFA_D00","CBD_Renal_SOFA_D01","CBD_Renal_SOFA_D01-D02","CBD_Renal_SOFA_D02","CBD_Renal_SOFA_D03-D04","CBD_Renal_SOFA_D05-D07", "CBD_Resp_SOFA_D00","CBD_Resp_SOFA_D01","CBD_Resp_SOFA_D01-D02","CBD_Resp_SOFA_D02","CBD_Resp_SOFA_D03-D04","CBD_Resp_SOFA_D05-D07", "CBD_SOFA_D00","CBD_SOFA_D01","CBD_SOFA_D01-D02","CBD_SOFA_D02","CBD_SOFA_D03-D04","CBD_SOFA_D05-D07") val fillDigit=fillTransmartNullForDigitCol(fillStr,digitColumns,nullValue) val finalSofaTPData=fillDigit /*Step5 : output data to disk*/ WriteDataToDisk(finalSofaTPData,"/tmp/Realism","SofaTP_data")*/ return sofaMergedAndOrderedDf } /** * This function merges values of two column, if one is null, return other, if two values are not null, check if they * are equal, otherwise raise exception, two column cant be merged. * @author <NAME> * @version 1.0 * @since 2019-02-13 * @param col1Value first column value to be merged * @param col2Value second column value to be merged * @return String * */ def mergeValue(col1Value:String,col2Value:String):String={ if (col1Value.equals("null")) {return col2Value} else if(col2Value.equals("null") || col1Value.equals(col2Value)) {return col1Value} else {return "error"} } // define a spark udf for mergeValue funciton val mergeValueUDF = udf(mergeValue(_:String, _: String)) /** * This function takes a dataframe and a list of sofa v2 column names. Based on the v2 column names, it can build the * corresponding v1 column names, then it calls the udf mergeValue to merge the v1 and v2 column. In the end it * removes the v2 day01 and v1 column, and add the merged column. * * @author <NAME> * @version 1.0 * @since 2019-02-27 * @param df the source data frame * @param colNameList the sofa v2 column names list * @return DataFrame * */ def mergeSofaColumns(df:DataFrame,colNameList:Array[String]):DataFrame={ var result=df for(colName<-colNameList){ // We exclude CBD_SOFA_NA, because it does not exist in V1, so no need to do the merge if (!colName.equals("CBD_SOFA_NA")){ /* CBD_Cardio_SOFA, generates CBD_Cardio_SOFA_D01 and CBD_Cardio_SOFA_Theoretical_D1 */ val col1Name=s"${colName}_Theoretical_D1" val col2Name=s"${colName}_D01" result=result.withColumn(s"merged_${col2Name}", mergeValueUDF(col(col1Name),col(col2Name))) //check the merge result result.select(s"merged_${col2Name}",col1Name,col2Name).show(10,false) //clean the result, drop v1 and V2 day01 columns, and rename merged_column to V2 day01 result=result.drop(col1Name) .drop(col2Name) .withColumnRenamed(s"merged_${col2Name}",col2Name) } } result } /******************************************** 17.7 Joining data *******************************************/ /******************************************** 17.8 Compare two columns *************************************/ /******************************************** 17.9 Other helping function ************************************/ /* This function add a new STUDY_ID column, rename the Patient column to subjID*/ def NormalizeColNameForTransmart(df:DataFrame,colNames:Array[String]):DataFrame={ val spark=df.sparkSession import spark.implicits._ /* step 0: cast all column to string*/ val dfStr=df.select(df.columns.map(c=>col(c).cast(StringType)):_*) /* step 1: fill na with user defined null*/ val dfNaFill=dfStr.na.fill(nullValue,dfStr.columns) /* step 2: add column study_id */ val dfWithStudyID=dfNaFill.withColumn("STUDY_ID",lit(studyID)) /* step 3: change col name Patient to SUBJ_ID*/ val dfWithSub=dfWithStudyID.withColumnRenamed("Patient",subjID) /* step 4: Re-order columns*/ val colNameWithOrder=Array("STUDY_ID",subjID)++colNames.filter(!_.equals(patientIdColName)) val result=dfWithSub.select(colNameWithOrder.head,colNameWithOrder.tail:_*) return result } /** * This function takes a data frame and a list of column names, it will print the distinct value of each given column * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @param colNames A list of column names * @return Unit * */ def getDistinctValueOfColumns(df:DataFrame,colNames:Array[String],showRange:Int):Unit={ for(colName<-colNames){ df.select(colName).distinct().show(showRange,false) } } /** * This function takes a data frame, a column name and an array of specific value. It will remove all rows if the * given column contains the specific value in the Array. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @param colName target column name * @param specValues an Array of specific values * @return DataFrame * */ def removeRowsWithSpecValues(df:DataFrame,colName:String,specValues:Array[String]):DataFrame={ var result=df for(specValue<-specValues){ result=result.filter(!(result(colName)===specValue)) } result } /** * This function takes a data frame and returns a map of (colNum->colName), the elements of the return map are * sorted by the column number with asc order. * * @author <NAME> * @version 1.0 * @since 2018-12-20 * @param df The source data frame. * @return a Map[Int, String] * */ def getColumnNumNameMapping(df:DataFrame):scala.collection.immutable.ListMap[Int,String]={ val columns=df.columns var i=1 var colNameNumMap=Map[Int,String]() for(col<-columns){ colNameNumMap+=(i->col) i=i+1 } /* To understand the following function, it's better to break it into two parts * 1. val x:Seq[(Int,String)] = colNameNumMap.toSeq.sortWith(_._1 < _._1) * * 2. ListMap(x:_*) * * The sortWith function returns a sequence of tuples, it takes a boolean expression, in * our example, _._1 means the first element of a tuple. We can also replace the sortWith * function with sortBy(_._1), which means sort the sequence by using the first element of the * tuples, from low to high. It also returns a sequence of tuples. * * The _* is used to convert the data so it will be passed as multiple parameters. In our example, * x has a Sequence type, but x:_* has tuple type * */ ListMap(colNameNumMap.toSeq.sortWith(_._1 < _._1):_*) } /* This function write the input dataframe to the output file system*/ def WriteDataToDisk(df:DataFrame,outputPath:String,fileName:String): Unit ={ df.coalesce(1).write.mode(SaveMode.Overwrite) .option("header","true") .option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of crc files .option("encoding", "UTF-8") .option("delimiter", outputCsvDelimiter) // set tab as delimiter, required by tranSMART .csv(outputPath+"/"+fileName) } /* This study has five sub group of patients HV(Healthy Volunteer), Septic shock, Trauma, Surgery, Burn. This function * get info such as row number, all possible time point for each sub group*/ def GetStatsOfEachSubGroup(df:DataFrame,groupName:String):DataFrame={ val subGroup=df.filter(df("Subgroup")===groupName) val subGroupPatientRows=subGroup.groupBy("Patient").count().select("count").distinct().orderBy(asc("count")) println(s"**************************** All possible patient rows of sub group ${groupName}*******************") subGroupPatientRows.show() println(s"**************************** All possible time point of sub group ${groupName}*******************") val subGroupTimePoint=subGroup.select("TP_Class").distinct().orderBy(asc("TP_Class")) subGroupTimePoint.show(10) return subGroup } /** * This function build full colName for bioMarker, flow cytometry after row to column transformation * @author <NAME> * @version 1.0 * @since 2019-07-06 * @param CategoryName The category name of the bioMarker e.g. FLOW_CYTOMETRY. * @param colNames colNames represent all bioMarker values which will be transformed into column names in the result * @return Array[String] full list of the bioMarker resulting col name * */ def generateFullCols(CategoryName:String, colNames:Array[String]):Array[String]={ val tps=Array("D00","D01","D01-D02","D02","D03-D04","D05-D07","D14","D28","D60") val tails=Array("Value","Imputed_Value","Missing_Value_Type") var result:Array[String]=Array() for(colName<-colNames){ for(tp<-tps){ for(tail<-tails){ val fullColName=s"${CategoryName}_${colName}_${tp}/${tail}" result=result:+fullColName } } } return result } /*********************************************************************************************************** * ************************************** Annexe ******************************************* * ******************************************************************************************************/ /* * * the :_* syntax which means "treat this sequence as a sequence"! Otherwise, your sequence of n items will be * treated as a sequence of 1 item (which will be your sequence of n items). * * val seq = List(1, 2, 3) funcWhichTakesSeq(seq) //1: Array(List(1, 2, 3)) -i.e. a Seq with one entry funcWhichTakesSeq(seq: _*) //3: List(1, 2, 3) * def funcWhichTakesSeq(seq: Any*) = println(seq.length + ": " + seq) * */ }
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/ml/classification/DecisionTreeClassification.scala
package org.pengfei.spark.ml.classification import org.apache.log4j.{Level, Logger} import org.apache.spark.ml.Pipeline import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, DecisionTreeClassifier} import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator import org.apache.spark.ml.feature.{IndexToString, StringIndexer, VectorAssembler, VectorIndexer} import org.apache.spark.sql.SparkSession import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType} object DecisionTreeClassification { def main(args: Array[String]): Unit ={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) val spark = SparkSession.builder(). master("local"). appName("DecisionTreeClassification"). getOrCreate() // val data = spark.read.text("file:////home/pliu/Documents/spark_input/iris.txt").map(p=>Iris(Vectors.dense(p(0).toDouble,p(1).toDouble,p(2).toDouble, p(3).toDouble),p(4).toString())).toDF() val inputFile="file:///home/pliu/Documents/spark_input/iris.txt" val schema = StructType(Array( StructField("sepal_length",DoubleType,false), StructField("sepal_width",DoubleType,false), StructField("petal_length",DoubleType,false), StructField("petal_width",DoubleType,false), StructField("Label",StringType,false) )) //Read csv file val df = spark.read.format("com.databricks.spark.csv").option("header", "false").schema(schema).load(inputFile) //As the ml takes vector as features, create a new vector column which groups the four features val assembler = new VectorAssembler().setInputCols(Array("sepal_length","sepal_width","petal_length","petal_width")).setOutputCol("rawFeatures") val vecDf=assembler.transform(df) vecDf.show(5) //set label index in the dataframe val labelIndexer = new StringIndexer() .setInputCol("Label") .setOutputCol("indexedLabel") .fit(vecDf) // set the feature index in the dataframe val featureIndexer = new VectorIndexer() .setInputCol("rawFeatures") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(vecDf) // split the data set for training data and test data val Array(trainingData, testData)=vecDf.randomSplit(Array(0.8,0.2)) // set the decision classifier val decisionTree = new DecisionTreeClassifier() .setLabelCol("indexedLabel") .setFeaturesCol("indexedFeatures") // transform the generated prediction to a humain readable predictLabel val labelConverter = new IndexToString() .setInputCol("prediction") .setOutputCol("predictedLabel") .setLabels(labelIndexer.labels) // build the machine learning pipeline val pipeline = new Pipeline().setStages(Array(labelIndexer,featureIndexer,decisionTree,labelConverter)) // train the model val model = pipeline.fit(trainingData) // test the model val predictions = model.transform(testData) // Select example rows to display. predictions.select("predictedLabel", "Label", "rawFeatures").show(5) val evaluator = new MulticlassClassificationEvaluator() .setLabelCol("indexedLabel") .setPredictionCol("prediction") .setMetricName("accuracy") val accuracy = evaluator.evaluate(predictions) println("Estimation accuracy :" + accuracy) println("Test Error = " + (1.0 - accuracy)) val treeModel = model.stages(2).asInstanceOf[DecisionTreeClassificationModel] println("Learned classification tree model:\n" + treeModel.toDebugString) //val total= df.count() //println("Total row is :"+total) //df.show(10) //val setosa= df.filter(df("Label")==="Iris-setosa").count() //println("Setosa number is :"+setosa) //df.createOrReplaceGlobalTempView("iris") //val label = spark.sql("select COUNT(Label) from global_temp.iris where Label = 'Iris-setosa'") //label.show() //val labelIndexer = new StringIndexer().setInputCol("Label") } }
pengfei99/Spark
WordCount/src/main/java/org/pengfei/spark/SparkHBaseIO.scala
package org.pengfei.spark import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hbase.HBaseConfiguration import org.apache.hadoop.hbase.client.{Put, Result} import org.apache.hadoop.hbase.io.ImmutableBytesWritable import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat} import org.apache.hadoop.hbase.util.Bytes import org.apache.hadoop.mapreduce.Job import org.apache.spark.{SparkConf, SparkContext} object SparkHBaseIO { def main(args:Array[String]): Unit ={ // create hbase configuration val hbaseConf = HBaseConfiguration.create() val sparkConf = new SparkConf().setAppName("SparkHBaseIO").setMaster("local") val sc = new SparkContext(sparkConf) insertRDDtoTable("student",sc) getTableAsRDD("student",sc,hbaseConf) } def getTableAsRDD(tableName:String,sparkContext: SparkContext,hbaseConf:Configuration): Unit ={ //set table name as student hbaseConf.set(TableInputFormat.INPUT_TABLE,tableName) val stuRDD = sparkContext.newAPIHadoopRDD(hbaseConf,classOf[TableInputFormat],classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable], classOf[org.apache.hadoop.hbase.client.Result]) val count = stuRDD.count() println("Studnets RDD count: "+ count) stuRDD.cache() stuRDD.foreach({ case (_,result) => val key = Bytes.toString(result.getRow) val name = Bytes.toString(result.getValue("info".getBytes,"name".getBytes)) val gender = Bytes.toString(result.getValue("info".getBytes,"gender".getBytes)) val age = Bytes.toString(result.getValue("info".getBytes,"age".getBytes)) println("Row key:"+key+" Name:"+name+" Gender:"+gender+" Age:"+age) }) } def insertRDDtoTable(tableName:String,sc:SparkContext): Unit ={ sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tableName) val job = Job.getInstance(sc.hadoopConfiguration) job.setOutputKeyClass(classOf[ImmutableBytesWritable]) job.setOutputValueClass(classOf[Result]) job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]]) // create a rdd with two lines val indataRDD = sc.makeRDD(Array("3,Rongcheng,M,26","4,Guanhua,M,32")) val rdd = indataRDD.map(_.split(',')).map{arr=>{ // add row key val put = new Put(Bytes.toBytes(arr(0))) //add column name, gender, age to column family info put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("name"),Bytes.toBytes(arr(1))) put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("gender"),Bytes.toBytes(arr(2))) put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("age"),Bytes.toBytes(arr(3))) (new ImmutableBytesWritable, put) }} rdd.saveAsNewAPIHadoopDataset(job.getConfiguration()) } }
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_Exc01_yelp.scala
<reponame>pengfei99/Spark package org.pengfei.Lesson04_Spark_SQL import com.typesafe.config.ConfigFactory import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.SparkSession object Lesson04_Exc01_yelp { def main(args:Array[String])={ Logger.getLogger("org").setLevel(Level.OFF) Logger.getLogger("akka").setLevel(Level.OFF) /*In this exc01, I will use a yelp data set to illustrate how to do data analytics with spark*/ val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark") val path= sparkConfig.getString("sourceDataPath") val filePath=s"${path}/spark_lessons/Lesson04_Spark_SQL/yelp_academic_dataset_business.json" val spark=SparkSession.builder().master("local[2]").appName("Lesson4_Exc01_yelp").getOrCreate() val df=spark.read.option("inferSchema","true").json(filePath) df.show(1,false) //val df1=spark.read.format("json").option("inferSchema","true").load(filePath) //df1.show(1,false) /************************************************************************************************* * ***************************Step 1. Understand your dataset************************************ *************************************************************************************************/ /********************************1.1 Check dataset schema***************************************/ /* Here I ask spark to infer the schema from json file, so we need to check the schema, if you define the * schema yourself, you don't need to do this*/ df.printSchema() /*******************************1.2 get dataset size******************************************/ // get dataset row numbers val rowNum=df.count() // get column numbers val colNum=df.columns.length println(s"data set has ${rowNum} rows and ${colNum} columns") } }
pengfei99/Spark
LearningSpark/src/main/java/org/pengfei/Lesson21_Testing/Greeting.scala
<filename>LearningSpark/src/main/java/org/pengfei/Lesson21_Testing/Greeting.scala package org.pengfei.Lesson21_Testing object Greeting { }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/stats/DescriptiveStatistics.scala
<reponame>comister/kayenta<filename>kayenta-judge/src/main/scala/com/netflix/kayenta/judge/stats/DescriptiveStatistics.scala /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.stats import com.netflix.kayenta.judge.Metric import org.apache.commons.math.util.FastMath import org.apache.commons.math3.stat.StatUtils import org.apache.commons.math3.stat.descriptive.rank.Percentile import org.apache.commons.math3.stat.descriptive.rank.Percentile.EstimationType case class MetricStatistics(min: Double, max: Double, mean: Double, std: Double, count: Int){ def toMap: Map[String, Any] = { Map("min" -> min, "max" -> max, "mean" -> mean, "std" -> std, "count" -> count) } } object DescriptiveStatistics { def mean(metric: Metric): Double = { if (metric.values.isEmpty) 0.0 else StatUtils.mean(metric.values) } def median(metric: Metric): Double = { if (metric.values.isEmpty) 0.0 else StatUtils.percentile(metric.values, 50) } def min(metric: Metric): Double = { if (metric.values.isEmpty) 0.0 else StatUtils.min(metric.values) } def max(metric: Metric): Double = { if (metric.values.isEmpty) 0.0 else StatUtils.max(metric.values) } def std(metric: Metric): Double = { if (metric.values.isEmpty) 0.0 else FastMath.sqrt(StatUtils.variance(metric.values)) } /** * Returns an estimate of the pth percentile of the values in the metric object. * Uses the R-7 estimation strategy when the desired percentile lies between two data points. * @param metric input metric * @param p the percentile value to compute * @return the percentile value or Double.NaN if the metric is empty */ def percentile(metric: Metric, p: Double): Double ={ this.percentile(metric.values, p) } /** * Returns an estimate of the pth percentile of the values in the values array. * Uses the R-7 estimation strategy when the desired percentile lies between two data points. * @param values input array of values * @param p the percentile value to compute * @return the percentile value or Double.NaN if the array is empty */ def percentile(values: Array[Double], p: Double): Double ={ val percentile = new Percentile().withEstimationType(EstimationType.R_7) percentile.evaluate(values, p) } /** * Calculate a set of descriptive statistics for the input metric */ def summary(metric: Metric): MetricStatistics = { val mean = this.mean(metric) val min = this.min(metric) val max = this.max(metric) val std = this.std(metric) val count = metric.values.length MetricStatistics(min, max, mean, std, count) } }
comister/kayenta
kayenta-judge/src/test/scala/com/netflix/kayenta/judge/TestContextManagement.scala
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge import org.scalatest.{BeforeAndAfterAll, Suite} import org.springframework.core.annotation.{AnnotatedElementUtils, AnnotationAttributes} import org.springframework.test.annotation.DirtiesContext import org.springframework.test.context.support.DirtiesContextTestExecutionListener import org.springframework.test.context.{TestContext, TestContextManager} import org.springframework.util.Assert /** * Manages Spring test contexts via a TestContextManager. * * Implemented as a stackable trait that uses beforeAll() and afterAll() hooks to invoke initialization * and destruction logic, respectively. * Test contexts are marked dirty, and hence cleaned up, after all test methods have executed. * There is currently no support for indicating that a test method dirties a context. * * Sample usage: * {{{ * @ContextConfiguration(classes = Array(classOf[SomeConfiguration])) * class SomeTestSpec extends FlatSpec with TestContextManagement { * * // Use standard Autowired Spring annotation to inject necessary dependencies * // Note that Spring will inject val (read-only) fields * @Autowired * val someDependency: SomeClass = _ * * "Some test" should "verify something" in { * // Test implementation that uses injected dependency * } * * } * }}} * * @see org.springframework.test.context.TestContextManager * */ trait TestContextManagement extends BeforeAndAfterAll { this: Suite => private val testContextManager: TestContextManager = new TestContextManager(this.getClass) abstract override def beforeAll(): Unit = { super.beforeAll testContextManager.registerTestExecutionListeners(AlwaysDirtiesContextTestExecutionListener) testContextManager.beforeTestClass testContextManager.prepareTestInstance(this) } abstract override def afterAll(): Unit = { testContextManager.afterTestClass super.afterAll } } /** * Test execution listener that always dirties the context to ensure that contexts get cleaned after test execution. * * Note that this class dirties the context after all test methods have run. */ protected object AlwaysDirtiesContextTestExecutionListener extends DirtiesContextTestExecutionListener { @throws(classOf[Exception]) override def afterTestClass(testContext: TestContext) { val testClass: Class[_] = testContext.getTestClass Assert.notNull(testClass, "The test class of the supplied TestContext must not be null") val annotationType: String = classOf[DirtiesContext].getName val annAttrs: AnnotationAttributes = AnnotatedElementUtils.getAnnotationAttributes(testClass, annotationType) val hierarchyMode: DirtiesContext.HierarchyMode = if ((annAttrs == null)) null else annAttrs.getEnum[DirtiesContext.HierarchyMode]("hierarchyMode") dirtyContext(testContext, hierarchyMode) } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/detectors/KSigmaDetector.scala
<filename>kayenta-judge/src/main/scala/com/netflix/kayenta/judge/detectors/KSigmaDetector.scala /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.detectors import org.apache.commons.math3.stat.StatUtils /** * KSigma Detector * * Values which are greater than or less than k standard deviations from the mean are considered outliers * Reference: https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule */ class KSigmaDetector(k: Double = 3.0) extends BaseOutlierDetector{ require(k > 0.0, "k must be greater than zero") override def detect(data: Array[Double]): Array[Boolean] = { //Calculate the mean and standard deviation of the input data val mean = StatUtils.mean(data) val variance = StatUtils.populationVariance(data) val stdDeviation = scala.math.sqrt(variance) //Values that fall outside of k standard deviations from the mean are considered outliers data.map(x => if (scala.math.abs(x - mean) > (stdDeviation * k)) true else false) } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/utils/RandomUtils.scala
package com.netflix.kayenta.judge.utils import scala.util.Random object RandomUtils { private var random = new Random() /** * Initialize Random with the desired seed */ def init(seed: Int): Unit = { random = new Random(seed) } /** * Draw random samples from a normal (Gaussian) distribution. * @param mean Mean (“centre”) of the distribution. * @param stdev Standard deviation (spread or “width”) of the distribution. * @param numSamples Number of samples to draw */ def normal(mean: Double, stdev: Double, numSamples: Int): Array[Double] ={ List.fill(numSamples)(random.nextGaussian() * stdev + mean).toArray } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/utils/MapUtils.scala
<reponame>comister/kayenta /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.utils import scala.collection.JavaConverters._ object MapUtils { def get(data: Any, path: String*): Option[Any] = { if (path.isEmpty) Some(data) else { data match { case jm: java.util.Map[_, _] => val map = jm.asScala.toMap.asInstanceOf[Map[String, Any]] map.get(path.head).flatMap(v => get(v, path.tail: _*)) case m: Map[_, _] => val map = m.asInstanceOf[Map[String, Any]] map.get(path.head).flatMap(v => get(v, path.tail: _*)) case jl: java.util.List[_] => val result = jl.asScala.toSeq.flatMap(v => get(v, path: _*)) if (result.isEmpty) None else Some(result) case vs: Seq[_] => val result = vs.flatMap(v => get(v, path: _*)) if (result.isEmpty) None else Some(result) case _ => None } } } def getAsStringWithDefault(default: String, data: Any, path: String*): String = { get(data, path: _*).getOrElse(default).toString } def getAsBooleanWithDefault(default: Boolean, data: Any, path: String*): Boolean = { get(data, path: _*).getOrElse(default).asInstanceOf[Boolean] } def getAsDoubleWithDefault(default: Double, data: Any, path: String*): Double = { val item = get(data, path: _*).getOrElse(default) item match { case integer: Integer => integer.toDouble case _ => item.asInstanceOf[Double] } } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/detectors/IQRDetector.scala
<reponame>comister/kayenta /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.detectors import com.netflix.kayenta.judge.stats.DescriptiveStatistics.percentile /** * Interquartile Range Detector * * Values which fall below Q1-factor*(IQR) or above Q3+factor(IQR) are considered outliers. * The IQR is a measure of statistical dispersion, being equal to the difference between * the upper and lower quartiles. * * Reference: https://en.wikipedia.org/wiki/Outlier#Tukey.27s_test * Note: To reduce sensitivity, take the max of the IQR or the 99th percentile */ class IQRDetector(factor: Double = 1.5, reduceSensitivity: Boolean = false) extends BaseOutlierDetector { require(factor > 0.0, "factor must be greater than zero") /** * Calculate the Interquartile Range (IQR) * @param data * @return */ private def calculateIQR(data: Array[Double]): (Double, Double) = { //Calculate the 25th and 75th percentiles val p75 = percentile(data, 75) val p25 = percentile(data, 25) //Calculate the Interquartile Range (IQR) val iqr = p75-p25 //Calculate the upper and lower fences val lowerIQR = p25 - (factor * iqr) val upperIQR = p75 + (factor * iqr) (lowerIQR, upperIQR) } override def detect(data: Array[Double]): Array[Boolean] ={ val (lowerFence, upperFence) = if(reduceSensitivity){ //Calculate the Interquartile Range (IQR) val (lowerIQR, upperIQR) = calculateIQR(data) //Calculate the 1st and 99th percentiles val p01 = percentile(data, 1) val p99 = percentile(data, 99) //Calculate the upper and lower fences val lowerFence = math.min(p01, lowerIQR) val upperFence = math.max(p99, upperIQR) (lowerFence, upperFence) } else { calculateIQR(data) } data.map(x => if (x > upperFence || x < lowerFence) true else false) } }
comister/kayenta
kayenta-judge/src/test/scala/com/netflix/kayenta/judge/StatisticSuite.scala
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge import com.netflix.kayenta.judge.stats.DescriptiveStatistics.percentile import com.netflix.kayenta.judge.stats.{DescriptiveStatistics, EffectSizes, MetricStatistics} import org.scalatest.FunSuite import org.scalatest.Matchers._ class StatisticSuite extends FunSuite{ test("Summary Statistics: Scalar"){ val metric = Metric("scalar", Array[Double](4.0), "test") val result = DescriptiveStatistics.summary(metric) val truth = MetricStatistics(4, 4, 4, 0, 1) assert(result === truth) } test("Summary Statistics: List"){ val metric = Metric("list", Array[Double](1.0, 1.0, 1.0, 10.0, 10.0, 10.0), "test") val result = DescriptiveStatistics.summary(metric) assert(result.min === 1.0) assert(result.max === 10.0) assert(result.mean === 5.5) assert(result.std === (4.9295 +- 1e-4)) assert(result.count === 6.0) } test("Summary Statistics: No Data"){ val metric = Metric("testNoData", Array[Double](), "test") val result = DescriptiveStatistics.summary(metric) val truth = MetricStatistics(0, 0, 0, 0, 0) assert(result === truth) } test("Summary Statistics: Basic Percentile"){ val testData = Array(0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5) assert(percentile(testData, 5) === (0.175 +- 1.0e-4)) assert(percentile(testData, 50) === 1.75) assert(percentile(testData, 100) === 3.5) } test("Summary Statistics: Basic Percentile Estimate (Linear Interpolation)") { val testData = Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0) assert(percentile(testData, 50) === 4.5) } test("Summary Statistics: Percentile (NIST Data)"){ val testData = Array( 95.1772, 95.1567, 95.1937, 95.1959, 95.1442, 95.0610, 95.1591, 95.1195, 95.1772, 95.0925, 95.1990, 95.1682 ) assert(percentile(testData, 90) === 95.19568) } test("Summary Statistics: Percentile Metric Object"){ val metric = Metric("test", Array[Double](1.0), "test") assert(percentile(metric, 100) === 1.0) } test("Summary Statistics: Percentile Estimate (Linear Interpolation)"){ val testData = Array( 0.07142857142857144, 0.02083333333333332, 0.16666666666666666, 0.03448275862068966, 0.038461538461538464, 0.03225806451612904, 0.027777777777777773, 0.0, 0.23076923076923078, 0.10344827586206898, 0.04545454545454542, 0.0, 0.028571428571428564, 0.0, 0.0, 0.04, 0.0, 0.0, 0.05128205128205127, 0.10714285714285716, 0.0263157894736842, 0.04166666666666667, 0.09523809523809522, 0.02941176470588235, 0.024999999999999984, 0.0, 0.0, 0.023809523809523794, 0.0, 0.02564102564102563, 0.0, 0.0, 0.028571428571428564, 0.07142857142857144, 0.047619047619047596, 0.021276595744680833, 0.02564102564102563, 0.03125, 0.03125, 0.03125, 0.11363636363636356, 0.03571428571428572, 0.0, 0.02777777777777777, 0.0, 0.0, 0.055555555555555546, 0.028571428571428564, 0.03225806451612904 ) assert(percentile(testData, 25) === 0.0) assert(percentile(testData, 75) === (0.0416 +- 1.0e-4)) } test("Effect Size: Mean Ratio (No Effect)"){ val experimentData = Array(1.0, 1.0, 1.0, 1.0, 1.0) val controlData = Array(1.0, 1.0, 1.0, 1.0, 1.0) val experimentMetric = Metric("test-metric", experimentData, "canary") val controlMetric = Metric("test-metric", controlData, "baseline") val result = EffectSizes.meanRatio(controlMetric, experimentMetric) assert(result === 1.0) } test("Effect Size: Mean Ratio (Summary Stats)"){ val experimentData = Array(1.0, 1.0, 1.0, 1.0, 1.0) val controlData = Array(1.0, 1.0, 1.0, 1.0, 1.0) val experimentMetric = Metric("test-metric", experimentData, "canary") val controlMetric = Metric("test-metric", controlData, "baseline") val experimentStats = DescriptiveStatistics.summary(experimentMetric) val controlStats = DescriptiveStatistics.summary(controlMetric) val result = EffectSizes.meanRatio(controlMetric, experimentMetric) assert(result === 1.0) } test("Effect Size: Mean Ratio (High)"){ val experimentData = Array(10.0, 10.0, 10.0, 10.0, 10.0) val controlData = Array(1.0, 1.0, 1.0, 1.0, 1.0) val experimentMetric = Metric("high-metric", experimentData, "canary") val controlMetric = Metric("high-metric", controlData, "baseline") val result = EffectSizes.meanRatio(controlMetric, experimentMetric) assert(result === 10.0) } test("Effect Size: Mean Ratio (Low)"){ val experimentData = Array(10.0, 10.0, 10.0, 10.0, 10.0) val controlData = Array(100.0, 100.0, 100.0, 100.0, 100.0) val experimentMetric = Metric("high-metric", experimentData, "canary") val controlMetric = Metric("high-metric", controlData, "baseline") val result = EffectSizes.meanRatio(controlMetric, experimentMetric) assert(result === 0.1) } test("Effect Size: Mean Ratio (Zero Mean)"){ val experimentData = Array(10.0, 10.0, 10.0, 10.0, 10.0) val controlData = Array(0.0, 0.0, 0.0, 0.0, 0.0) val experimentMetric = Metric("high-metric", experimentData, "canary") val controlMetric = Metric("high-metric", controlData, "baseline") assertThrows[IllegalArgumentException]{ EffectSizes.meanRatio(controlMetric, experimentMetric) } } test("Effect Size: Cohen's D"){ val experimentData = Array(5.0, 5.0, 5.0, 10.0, 10.0, 10.0) val controlData = Array(1.0, 1.0, 1.0, 2.0, 2.0, 2.0) val experimentMetric = Metric("high-metric", experimentData, "canary") val controlMetric = Metric("high-metric", controlData, "baseline") val result = EffectSizes.cohenD(experimentMetric, controlMetric) assert(result === (3.03821 +- 1e-5)) } }
comister/kayenta
kayenta-judge/src/test/scala/com/netflix/kayenta/judge/TransformSuite.scala
<filename>kayenta-judge/src/test/scala/com/netflix/kayenta/judge/TransformSuite.scala /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge import com.netflix.kayenta.judge.preprocessing.Transforms.{removeNaNs, removeOutliers, replaceNaNs} import com.netflix.kayenta.judge.detectors.{IQRDetector, KSigmaDetector} import com.netflix.kayenta.judge.preprocessing.Transforms import com.netflix.kayenta.judge.utils.RandomUtils import org.apache.commons.math3.stat.StatUtils import org.scalatest.FunSuite import org.scalatest.Matchers._ class TransformSuite extends FunSuite { test("Remove Single NaN"){ val testData = Array(0.0, 1.0, Double.NaN, 1.0, 0.0) val truth = Array(0.0, 1.0, 1.0, 0.0) val result = Transforms.removeNaNs(testData) assert(result === truth) } test("Remove Multiple NaN") { val testData = Array(Double.NaN, Double.NaN, Double.NaN) val truth = Array[Double]() val result = removeNaNs(testData) assert(result === truth) } test("Replace NANs") { val testData = Array(Double.NaN, 10.10, 20.20, 30.30, Double.NaN, 40.40) val truth = Array(0.0, 10.10, 20.20, 30.30, 0.0, 40.40) val result = replaceNaNs(testData, 0.0) assert(result === truth) } test("IQR Outlier Removal") { val testData = Array(21.0, 23.0, 24.0, 25.0, 50.0, 29.0, 23.0, 21.0) val truth = Array(21.0, 23.0, 24.0, 25.0, 29.0, 23.0, 21.0) val detector = new IQRDetector(factor = 1.5) val result = removeOutliers(testData, detector) assert(result === truth) } test("KSigma Outlier Removal") { val testData = Array(1.0, 1.0, 1.0, 1.0, 1.0, 20.0, 1.0, 1.0, 1.0, 1.0, 1.0) val truth = Array(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0) val detector = new KSigmaDetector(k = 3.0) val result = removeOutliers(testData, detector) assert(result === truth) } test("Add Gaussian Noise"){ val seed = 12345 RandomUtils.init(seed) val testData = Array(0.0, 0.0, 0.0, 0.0, 0.0) val transformed = Transforms.addGaussianNoise(testData, mean = 1.0, stdev = 1.0) val transformedMean = StatUtils.mean(transformed) val transformedStdev = math.sqrt(StatUtils.variance(transformed)) assert(transformedMean === (1.0 +- 0.2)) assert(transformedStdev === (1.0 +- 0.2)) assert(transformed.length === testData.length) } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/preprocessing/Validators.scala
<filename>kayenta-judge/src/main/scala/com/netflix/kayenta/judge/preprocessing/Validators.scala /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.preprocessing import com.netflix.kayenta.judge.Metric case class ValidationResult(valid: Boolean, reason: Option[String]=None) object Validators { /** * Validate if the input data array is empty * @param metric * @return */ def checkEmptyArray(metric: Metric): ValidationResult = { if(metric.values.isEmpty){ val reason = s"Empty data array for ${metric.label}" ValidationResult(valid=false, reason=Some(reason)) }else{ ValidationResult(valid=true) } } /** * Validate if the input data array is all NaN values * @param metric * @return */ def checkNaNArray(metric: Metric): ValidationResult = { if(metric.values.forall(_.isNaN)){ val reason = s"No data for ${metric.label}" ValidationResult(valid=false, reason=Some(reason)) }else{ ValidationResult(valid=true) } } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/classifiers/metric/MannWhitneyClassifier.scala
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.classifiers.metric import com.netflix.kayenta.judge.Metric import com.netflix.kayenta.judge.preprocessing.Transforms import com.netflix.kayenta.judge.stats.EffectSizes import com.netflix.kayenta.mannwhitney.{MannWhitney, MannWhitneyParams} import org.apache.commons.math3.stat.StatUtils case class MannWhitneyResult(lowerConfidence: Double, upperConfidence: Double, estimate: Double, deviation: Double) case class ComparisonResult(classification: MetricClassificationLabel, reason: Option[String], deviation: Double) class MannWhitneyClassifier(tolerance: Double=0.25, confLevel: Double=0.95, effectSizeThresholds: (Double, Double) = (1.0, 1.0), criticalThresholds: (Double, Double) = (1.0, 1.0)) extends BaseMetricClassifier { /** * Mann-Whitney U Test * An implementation of the Mann-Whitney U test (also called the Wilcoxon rank-sum test). * Note: In the case of the degenerate distribution, Gaussian noise is added */ def MannWhitneyUTest(experimentValues: Array[Double], controlValues: Array[Double]): MannWhitneyResult = { val mwTest = new MannWhitney() //Check for tied ranks and transform the data by adding Gaussian noise val addNoise = if (experimentValues.distinct.length == 1 && controlValues.distinct.length == 1) true else false val experiment = if(addNoise) addGaussianNoise(experimentValues) else experimentValues val control = if(addNoise) addGaussianNoise(controlValues) else controlValues val params = MannWhitneyParams(mu = 0, confLevel, control, experiment) val testResult = mwTest.eval(params) val confInterval = testResult.confidenceInterval val estimate = testResult.estimate //Calculate the deviation (Effect Size) between the experiment and control val effectSize = calculateDeviation(experiment, control) MannWhitneyResult(confInterval(0), confInterval(1), estimate, effectSize) } /** * Add Gaussian noise to the input array * Scale the amplitude of the noise based on the input values * Note: the input array should not contain NaN values */ private def addGaussianNoise(values: Array[Double]): Array[Double] = { val scalingFactor = 1e-5 val metricScale = values.distinct.head * scalingFactor Transforms.addGaussianNoise(values, mean=0.0, stdev = metricScale) } /** * Calculate the upper and lower bounds for classifying the metric. * The bounds are calculated as a fraction of the Hodges–Lehmann estimator */ private def calculateBounds(testResult: MannWhitneyResult): (Double, Double) = { val estimate = math.abs(testResult.estimate) val criticalValue = tolerance * estimate val lowerBound = -1 * criticalValue val upperBound = criticalValue (lowerBound, upperBound) } /** * Calculate the deviation (Effect Size) between the experiment and control */ private def calculateDeviation(experiment: Array[Double], control: Array[Double]): Double = { if(StatUtils.mean(control) == 0.0) 1.0 else EffectSizes.meanRatio(control, experiment) } /** * Compare the experiment to the control using the Mann-Whitney U Test and check the magnitude of the effect */ private def compare(control: Metric, experiment: Metric, direction: MetricDirection, effectSizeThresholds: (Double, Double)): ComparisonResult = { //Perform the Mann-Whitney U Test val mwResult = MannWhitneyUTest(experiment.values, control.values) val (lowerBound, upperBound) = calculateBounds(mwResult) //Check if the experiment is high in comparison to the control val isHigh = { (direction == MetricDirection.Increase || direction == MetricDirection.Either) && mwResult.lowerConfidence > upperBound && mwResult.deviation >= effectSizeThresholds._2 } //Check if the experiment is low in comparison to the control val isLow = { (direction == MetricDirection.Decrease || direction == MetricDirection.Either) && mwResult.upperConfidence < lowerBound && mwResult.deviation <= effectSizeThresholds._1 } if(isHigh){ val reason = s"${experiment.name} was classified as $High" ComparisonResult(High, Some(reason), mwResult.deviation) }else if(isLow){ val reason = s"${experiment.name} was classified as $Low" ComparisonResult(Low, Some(reason), mwResult.deviation) } else { ComparisonResult(Pass, None, mwResult.deviation) } } override def classify(control: Metric, experiment: Metric, direction: MetricDirection, nanStrategy: NaNStrategy, isCriticalMetric: Boolean, isDataRequired: Boolean): MetricClassification = { //Check if there is no-data for the experiment or control if (experiment.values.isEmpty || control.values.isEmpty) { if (nanStrategy == NaNStrategy.Remove) { val reason = s"Missing data for ${experiment.name}" //Check if the config indicates that the given metric should have data but not critically fail the canary if (isDataRequired && !isCriticalMetric) { return MetricClassification(NodataFailMetric, Some(reason), 1.0, critical = false) } return MetricClassification(Nodata, Some(reason), 1.0, isCriticalMetric) } else { return MetricClassification(Pass, None, 1.0, critical = false) } } //Check if the experiment and control data are equal if (experiment.values.sorted.sameElements(control.values.sorted)) { val reason = s"The ${experiment.label} and ${control.label} data are identical" return MetricClassification(Pass, Some(reason), 1.0, critical = false) } //Check the number of unique observations if (experiment.values.union(control.values).distinct.length == 1) { return MetricClassification(Pass, None, 1.0, critical = false) } //Compare the experiment to the control using the Mann-Whitney U Test, checking the magnitude of the effect val comparison = compare(control, experiment, direction, effectSizeThresholds) //Check if the metric was marked as critical, and if the metric was classified as a failure (High, Low) if(isCriticalMetric && comparison.classification == High && comparison.deviation >= criticalThresholds._2){ val reason = s"The metric ${experiment.name} was classified as $High (Critical)" MetricClassification(High, Some(reason), comparison.deviation, critical = true) }else if(isCriticalMetric && comparison.classification == Low && comparison.deviation <= criticalThresholds._1){ val reason = s"The metric ${experiment.name} was classified as $Low (Critical)" MetricClassification(Low, Some(reason), comparison.deviation, critical = true) }else if(isCriticalMetric && (comparison.classification == Nodata || comparison.classification == Error)){ MetricClassification(comparison.classification, comparison.reason, comparison.deviation, critical = true) }else{ MetricClassification(comparison.classification, comparison.reason, comparison.deviation, critical = false) } } }
comister/kayenta
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/evaluation/BinaryClassificationEvaluator.scala
<filename>kayenta-judge/src/main/scala/com/netflix/kayenta/judge/evaluation/BinaryClassificationEvaluator.scala /* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.kayenta.judge.evaluation import com.netflix.kayenta.judge.classifiers.metric._ /** * Evaluator for binary classification */ class BinaryClassificationEvaluator extends BaseEvaluator{ private def validInput(input: Array[Int]): Boolean ={ input.contains(0) || input.contains(1) } /** * Calculate evaluation metrics * @param truth ground truth (correct) labels * @param predictions predicted labels, as returned by a classifier * @return map of evaluation results (precision, recall, f1, accuracy) */ def calculateMetrics(truth: Array[Int], predictions: Array[Int]): Map[String, Double] ={ require(predictions.length == truth.length, "the prediction vector and truth vector must be the same size") require(validInput(predictions) && validInput(truth), "the prediction or truth vectors contain invalid entries") //Calculate the evaluation metrics val precision = Metrics.precision(truth, predictions) val recall = Metrics.recall(truth, predictions) val f1 = Metrics.fMeasure(truth, predictions) val accuracy = Metrics.accuracy(truth, predictions) //Return a default value of -1.0 Map(("Precision", precision), ("Recall", recall), ("FMeasure", f1), ("Accuracy", accuracy)).withDefaultValue(-1.0) } /** * Convert the classification labels to a binary representation * @param label metric classification result (label) * @return binary representation */ def convertLabel(label: MetricClassificationLabel): Int ={ label match { case High => 1 case Low => 1 case NodataFailMetric => 1 case Nodata => 0 case Pass => 0 case Error => -1 } } /** * Evaluate a metric classification algorithm (binary) * @param classifier metric classification algorithm * @param dataset input dataset to evaluate * @return map of evaluation results (precision, recall, f1, accuracy) */ override def evaluate[T <: BaseMetricClassifier](classifier: T, dataset: List[LabeledInstance]): Map[String, Double] = { val truth = dataset.map(x => convertLabel(x.label)) val predictions = dataset.map(x => classifier.classify(x.control, x.experiment).classification) val binaryPredictions = predictions.map(convertLabel) calculateMetrics(truth.toArray, binaryPredictions.toArray) } }
renhaocui/ensembleTopic
TMT/test2.scala
<reponame>renhaocui/ensembleTopic import scalanlp.io._; import scalanlp.stage._; import scalanlp.stage.text._; import scalanlp.text.tokenize._; import scalanlp.pipes.Pipes.global._; import edu.stanford.nlp.tmt.stage._; import edu.stanford.nlp.tmt.model.lda._; import edu.stanford.nlp.tmt.model.llda._; val modelPath = file("TMT Snapshots"); println("Loading "+modelPath); val model = LoadCVB0LDA(modelPath); val source = CSVFile("TMT\\LDAFormatTrain.csv"); val text = { source ~> // read from the source file Column(2) ~> // select column containing text TokenizeWith(model.tokenizer.get) // tokenize with existing model's tokenizer } // Base name of output files to generate val output = file(modelPath, source.meta[java.io.File].getName.replaceAll(".csv","")); // turn the text into a dataset ready to be used with LDA val dataset = LDADataset(text, termIndex = model.termIndex); println("Writing document distributions to "+output+"-document-topic-distributions.csv"); val perDocTopicDistributions = InferCVB0DocumentTopicDistributions(model, dataset); CSVFile(output+"-document-topic-distributuions.csv").write(perDocTopicDistributions); /* println("Writing topic usage to "+output+"-usage.csv"); val usage = QueryTopicUsage(model, dataset, perDocTopicDistributions); CSVFile(output+"-usage.csv").write(usage); */ println("Estimating per-doc per-word topic distributions"); val perDocWordTopicDistributions = EstimatePerWordTopicDistributions( model, dataset, perDocTopicDistributions); println("Writing top terms to "+output+"-top-terms.csv"); val topTerms = QueryTopTerms(model, dataset, perDocWordTopicDistributions, numTopTerms=50); CSVFile(output+"-top-terms.csv").write(topTerms);
renhaocui/ensembleTopic
TMT/train.scala
<gh_stars>1-10 import scalanlp.io._; import scalanlp.stage._; import scalanlp.stage.text._; import scalanlp.text.tokenize._; import scalanlp.pipes.Pipes.global._; import edu.stanford.nlp.tmt.stage._; import edu.stanford.nlp.tmt.model.lda._; import edu.stanford.nlp.tmt.model.llda._; val source = CSVFile("TMT\\LDAFormatTrain.csv"); val tokenizer = { SimpleEnglishTokenizer() ~> // tokenize on space and punctuation CaseFolder() ~> // lowercase everything WordsAndNumbersOnlyFilter() ~> // ignore non-words and non-numbers MinimumLengthFilter(1) // take terms with >=1 characters } val text = { source ~> // read from the source file Column(2) ~> // select column containing text TokenizeWith(tokenizer) ~> // tokenize with tokenizer above TermCounter() ~> // collect counts (needed below) DocumentMinimumLengthFilter(0) // take only docs with >=1 terms } // define fields from the dataset we are going to slice against val labels = { source ~> // read from the source file Column(1) ~> // take column two, the year TokenizeWith(WhitespaceTokenizer()) ~> // turns label field into an array TermCounter() ~> // collect label counts TermMinimumDocumentCountFilter(0) // filter labels in < 1 docs } val dataset = LabeledLDADataset(text, labels); // define the model parameters val modelParams = LabeledLDAModelParams(dataset); // Name of the output model folder to generate val modelPath = file("TMT Snapshots"); // Trains the model, writing to the given output path TrainCVB0LabeledLDA(modelParams, dataset, output = modelPath, maxIterations = 1000);
f-loris/scio-idea-plugin
src/main/scala/com/spotify/scio/ScioInjector.scala
/* * Copyright 2016 Spotify AB. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.spotify.scio import java.nio.charset.Charset import java.nio.file.{Path, Paths} import com.google.common.base.Charsets import com.google.common.hash.Hashing import com.google.common.io.Files import com.intellij.openapi.diagnostic.Logger import com.intellij.psi.PsiElement import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScTypeDefinition} import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.SyntheticMembersInjector import scala.collection.mutable class ScioInjector extends SyntheticMembersInjector { private val logger = Logger.getInstance(classOf[ScioInjector]) // Could not find a way to get fully qualified annotation names // even tho there is API, it does not return the annotations. // For now stick with relative annotation names. private val BQTNamespace = "BigQueryType" private val fromQuery = s"$BQTNamespace.fromQuery" private val fromTable = s"$BQTNamespace.fromTable" private val annotations = Seq(fromQuery, fromTable, s"$BQTNamespace.fromSchema", s"$BQTNamespace.toTable") private val AvroTNamespace = "AvroType" private val avroAnnotations = Seq(s"$AvroTNamespace.fromSchema", s"$AvroTNamespace.fromPath", s"$AvroTNamespace.toSchema") private val alertEveryMissedXInvocations = 5 private val classMissed = mutable.HashMap.empty[String, Int].withDefaultValue(0) /** * Finds BigQuery cache directory, must be in sync with Scio implementation, otherwise plugin will * not be able to find scala files. */ private def getBQClassCacheDir: Path = { //TODO: add this as key/value settings with default etc if (sys.props("bigquery.class.cache.directory") != null) { Paths.get(sys.props("bigquery.class.cache.directory")) } else { Paths.get(sys.props("java.io.tmpdir")).resolve("bigquery-classes") } } private def findClassFile(fileName: String): Option[java.io.File] = { val classFile = getBQClassCacheDir.resolve(s"$fileName").toFile val classFilePath = classFile.getAbsolutePath if (classFile.exists()) { logger.debug(s"Found $classFilePath") classMissed(fileName) = 0 Some(classFile) } else { classMissed(fileName) += 1 val errorMessage = s"""|Scio plugin could not find scala files for code completion. Please (re)compile the project. |Missing: $classFilePath""".stripMargin if(classMissed(fileName) >= alertEveryMissedXInvocations) { // reset counter classMissed(fileName) = 0 logger.error(errorMessage) } logger.warn(errorMessage) None } } /** * Computes hash for macro - the hash must be consistent with hash implementation in Scio. */ private def genHashForMacro(owner: String, srcFile: String): String = { Hashing.murmur3_32().newHasher() .putString(owner, Charsets.UTF_8) .putString(srcFile, Charsets.UTF_8) .hash().toString } /** * Main method of the plugin. Injects syntactic inner members like case classes and companion * objects, makes IntelliJ happy about BigQuery macros. Assumes macro is enclosed within * class/object. */ override def injectInners(source: ScTypeDefinition): Seq[String] = { source.members.flatMap { case c: ScClass if c.annotations.map(_.getText).exists(t => annotations.exists(t.contains)) => val caseClasses = fetchGeneratedCaseClasses(source, c) val extraCompanionMethod = fetchExtraBQTypeCompanionMethods(source, c) val tupledMethod = getTupledMethod(c.getName, caseClasses) val applyPropsSignature = getApplyPropsSignature(caseClasses) val unapplyReturnTypes = getUnapplyReturnTypes(caseClasses).mkString(" , ") // TODO: missing extends and traits - are they needed? // $tn extends ${p(c, SType)}.HasSchema[$name] with ..$traits val companion = s"""|object ${c.getName} { | def apply( $applyPropsSignature ): ${c.getName} = ??? | def unapply(x$$0: ${c.getName}): _root_.scala.Option[($unapplyReturnTypes)] = ??? | def fromTableRow: _root_.scala.Function1[_root_.com.google.api.services.bigquery.model.TableRow, ${c.getName} ] = ??? | def toTableRow: _root_.scala.Function1[ ${c.getName}, _root_.com.google.api.services.bigquery.model.TableRow] = ??? | def schema: _root_.com.google.api.services.bigquery.model.TableSchema = ??? | def toPrettyString(indent: Int = 0): String = ??? | $extraCompanionMethod | $tupledMethod |}""".stripMargin if (caseClasses.isEmpty) { Seq.empty } else { caseClasses ++ Seq(companion) } case c: ScClass if c.annotations.map(_.getText).exists(t => avroAnnotations.exists(t.contains)) => val caseClasses = fetchGeneratedCaseClasses(source, c) val tupledMethod = getTupledMethod(c.getName, caseClasses) val applyPropsSignature = getApplyPropsSignature(caseClasses) val unapplyReturnTypes = getUnapplyReturnTypes(caseClasses).mkString(" , ") val companion = s"""|object ${c.getName} { | def apply( $applyPropsSignature ): ${c.getName} = ??? | def unapply(x$$0: ${c.getName}): _root_.scala.Option[($unapplyReturnTypes)] = ??? | def fromGenericRecord: _root_.scala.Function1[_root_.org.apache.avro.generic.GenericRecord, ${c.getName} ] = ??? | def toGenericRecord: _root_.scala.Function1[ ${c.getName}, _root_.org.apache.avro.generic.GenericRecord] = ??? | def schema: _root_.org.apache.avro.Schema = ??? | def toPrettyString(indent: Int = 0): String = ??? | $tupledMethod |}""".stripMargin if (caseClasses.isEmpty) { Seq.empty } else { caseClasses ++ Seq(companion) } case _ => Seq.empty } } private def getApplyPropsSignature(caseClasses: Seq[String]) = { getConstructorProps(caseClasses).map(_.props) .getOrElse(Seq.empty) .mkString(" , ") } private def fetchExtraBQTypeCompanionMethods(source: ScTypeDefinition, c: ScClass) = { val annotation = c.annotations.map(_.getText).find(t => annotations.exists(t.contains)).get logger.debug(s"Found $annotation in ${source.getTruncedQualifiedName}") val extraCompanionMethod = annotation match { case a if a.contains(fromQuery) => "def query: _root_.java.lang.String = ???" case a if a.contains(fromTable) => "def table: _root_.java.lang.String = ???" case _ => "" } extraCompanionMethod } private def fetchGeneratedCaseClasses(source: ScTypeDefinition, c: ScClass) = { // For some reason sometimes [[getVirtualFile]] returns null, use Option. I don't know why. val fileName = Option(c.asInstanceOf[PsiElement].getContainingFile.getVirtualFile) .map(_.getCanonicalPath) val hash = fileName.map(genHashForMacro(source.getTruncedQualifiedName, _)) hash.flatMap(h => findClassFile(s"${c.getName}-$h.scala")).map(f => { import collection.JavaConverters._ Files.readLines(f, Charset.defaultCharset()).asScala.filter(_.contains("case class")) }).getOrElse(Seq.empty) } private def getConstructorProps(caseClasses: Seq[String]): Option[ConstructorProps] = { // TODO: duh. who needs regex ... but seriously tho, should this be regex? caseClasses .find(c => c.contains("extends _root_.com.spotify.scio.bigquery.types.BigQueryType.HasAnnotation") || c.contains("extends _root_.com.spotify.scio.avro.types.AvroType.HasAvroAnnotation")) .map( _.split("[()]").filter(_.contains(" : ")) // get only parameter part .flatMap(propsStr => { val propsSplit = propsStr.split(",") // We need to fix the split since Map types contain ',' as a part of their type declaration val props = mutable.ArrayStack[String]() for (prop <- propsSplit) { if (prop.contains(" : ")) { props += prop } else { assume(props.nonEmpty) props += props.pop() + "," + prop } } props.result.toList })).map(ConstructorProps(_)) // get individual parameter } private[scio] def getUnapplyReturnTypes(caseClasses: Seq[String]): Seq[String] = { getConstructorProps(caseClasses).map(_.types).getOrElse(Seq.empty) } private[scio] def getTupledMethod(returnClassName: String, caseClasses: Seq[String]): String = { val maybeTupledMethod = getConstructorProps(caseClasses).map { case cp: ConstructorProps if (2 to 22).contains(cp.types.size) => s"def tupled: _root_.scala.Function1[( ${cp.types.mkString(" , ")} ), $returnClassName ] = ???" case _ => "" } maybeTupledMethod.getOrElse("") } case class ConstructorProps(props: Seq[String]) { val types: Seq[String] = props.map(_.split(" : ")(1).trim) } }
guizmaii/BooleanDsl
src/main/scala/com/guizmaii/boolean/dsl/BooleanDslV1.scala
<reponame>guizmaii/BooleanDsl package com.guizmaii.boolean.dsl sealed trait BooleanDslV1 { def unary_! : BooleanDslV1 final def &&(that: BooleanDslV1): BooleanDslV1 = BooleanDslV1.And(this, that) final def ||(that: BooleanDslV1): BooleanDslV1 = BooleanDslV1.Or(this, that) final def not: BooleanDslV1 = !this } object BooleanDslV1 { sealed trait Unary extends BooleanDslV1 private[dsl] final case class Pure(x: () => Boolean) extends Unary { override def unary_! : BooleanDslV1 = Not(x) } private[dsl] final case class Not(x: () => Boolean) extends Unary { override def unary_! : BooleanDslV1 = Pure(x) } sealed trait Binary extends BooleanDslV1 private[dsl] final case class And(x: BooleanDslV1, y: BooleanDslV1) extends Binary { override def unary_! : BooleanDslV1 = Nand(x, y) } private[dsl] final case class Nand(x: BooleanDslV1, y: BooleanDslV1) extends Binary { override def unary_! : BooleanDslV1 = And(x, y) } private[dsl] final case class Or(x: BooleanDslV1, y: BooleanDslV1) extends Binary { override def unary_! : BooleanDslV1 = Nor(x, y) } private[dsl] final case class Nor(x: BooleanDslV1, y: BooleanDslV1) extends Binary { override def unary_! : BooleanDslV1 = Or(x, y) } def pure(x: => Boolean): BooleanDslV1 = Pure(() => x) def interpret(exp: BooleanDslV1): Boolean = exp match { case Pure(x) => x() case Not(x) => !x() case And(Pure(x), Pure(y)) => x() && y() case And(Pure(x), Not(y)) => x() && !y() case And(Not(x), Pure(y)) => !x() && y() case And(Not(x), Not(y)) => !x() && !y() case And(Pure(x), And(y, z)) => x() && interpret(y) && interpret(z) case And(Pure(x), Nand(y, z)) => x() && !(interpret(y) && interpret(z)) case And(Pure(x), Or(y, z)) => x() && (interpret(y) || interpret(z)) case And(Pure(x), Nor(y, z)) => x() && !(interpret(y) || interpret(z)) case And(Not(x), And(y, z)) => !x() && (interpret(y) && interpret(z)) case And(Not(x), Nand(y, z)) => !x() && !(interpret(y) && interpret(z)) case And(Not(x), Or(y, z)) => !x() && (interpret(y) || interpret(z)) case And(Not(x), Nor(y, z)) => !x() && !(interpret(y) || interpret(z)) case Nand(Pure(x), Pure(y)) => !(x() && y()) case Nand(Pure(x), Not(y)) => !(x() && !y()) case Nand(Not(x), Pure(y)) => !(!x() && y()) case Nand(Not(x), Not(y)) => x() || y() case Nand(Pure(x), And(y, z)) => !(x() && (interpret(y) && interpret(z))) case Nand(Pure(x), Nand(y, z)) => !(x() && !(interpret(y) && interpret(z))) case Nand(Pure(x), Or(y, z)) => !(x() && (interpret(y) || interpret(z))) case Nand(Pure(x), Nor(y, z)) => !(x() && !(interpret(y) || interpret(z))) case Nand(Not(x), And(y, z)) => !(!x() && (interpret(y) && interpret(z))) case Nand(Not(x), Nand(y, z)) => x() || (interpret(y) && interpret(z)) case Nand(Not(x), Or(y, z)) => !(!x() && (interpret(y) || interpret(z))) case Nand(Not(x), Nor(y, z)) => x() || interpret(y) || interpret(z) case Or(Pure(x), Pure(y)) => x() || y() case Or(Pure(x), Not(y)) => x() || !y() case Or(Not(x), Pure(y)) => !x() || y() case Or(Not(x), Not(y)) => !x() || !y() case Or(Pure(x), And(y, z)) => x() || (interpret(y) && interpret(z)) case Or(Pure(x), Nand(y, z)) => x() || !(interpret(y) && interpret(z)) case Or(Pure(x), Or(y, z)) => x() || (interpret(y) || interpret(z)) case Or(Pure(x), Nor(y, z)) => x() || !(interpret(y) || interpret(z)) case Or(Not(x), And(y, z)) => !x() || (interpret(y) && interpret(z)) case Or(Not(x), Nand(y, z)) => !x() || !(interpret(y) && interpret(z)) case Or(Not(x), Or(y, z)) => !x() || (interpret(y) || interpret(z)) case Or(Not(x), Nor(y, z)) => !x() || !(interpret(y) || interpret(z)) case Nor(Pure(x), Pure(y)) => !(x() || y()) case Nor(Pure(x), Not(y)) => !(x() || !y()) case Nor(Not(x), Pure(y)) => !(!x() || y()) case Nor(Not(x), Not(y)) => x() && y() case Nor(Pure(x), And(y, z)) => !(x() || (interpret(y) && interpret(z))) case Nor(Pure(x), Nand(y, z)) => !(x() || !(interpret(y) && interpret(z))) case Nor(Pure(x), Or(y, z)) => !(x() || (interpret(y) || interpret(z))) case Nor(Pure(x), Nor(y, z)) => !(x() || !(interpret(y) || interpret(z))) case Nor(Not(x), And(y, z)) => !(!x() || (interpret(y) && interpret(z))) case Nor(Not(x), Nand(y, z)) => x() && interpret(y) && interpret(z) case Nor(Not(x), Or(y, z)) => !(!x() || (interpret(y) || interpret(z))) case Nor(Not(x), Nor(y, z)) => x() && (interpret(y) || interpret(z)) case And(x, y) => interpret(x) && interpret(y) case Nand(x, y) => !(interpret(x) && interpret(y)) case Or(x, y) => interpret(x) || interpret(y) case Nor(x, y) => !(interpret(x) || interpret(y)) } }
guizmaii/BooleanDsl
build.sbt
name := "BooleanDsl" version := "0.1" scalaVersion := "2.13.6" libraryDependencies ++= Seq( "org.scalatest" %% "scalatest" % "3.2.3" % Test, "org.scalatestplus" %% "scalacheck-1-14" % "3.2.2.0" % Test, "org.scalacheck" %% "scalacheck" % "1.15.1" % Test )
olka/stanoq
src/test/scala/org/stanoq/tests/crawler/ServiceNegativeSpec.scala
package org.stanoq.tests.crawler import akka.http.scaladsl.server.ValidationRejection import akka.http.scaladsl.testkit.ScalatestRouteTest import org.scalatest._ import org.stanoq.crawler.CrawlerService import org.stanoq.crawler.model.{ConfigProperties, CrawlerProtocols} class ServiceNegativeSpec extends FlatSpec with Matchers with ScalatestRouteTest with CrawlerProtocols { val crawlerService = new CrawlerService "org.stanoq.Service" should "respond with ValidationRejection on empty url" in { Post(s"/crawler", ConfigProperties("", 4)) ~> crawlerService.route ~> check { rejection === ValidationRejection("Cashflow entity has wrong structure!", None) } } "org.stanoq.Service" should "respond with ValidationRejection on null url" in { val exception = intercept[java.lang.IllegalArgumentException] { Post(s"/crawler", ConfigProperties(null, 4)) ~> crawlerService.route ~> check {} } exception.getMessage shouldBe "requirement failed: Config wasn't properly set!" } "org.stanoq.Service" should "respond with ValidationRejection on negative depthLimit" in { Post(s"/crawler", ConfigProperties("https://www.websocket.org/echo.html", -2)) ~> crawlerService.route ~> check { rejection === ValidationRejection("Cashflow entity has wrong structure!", None) } } "org.stanoq.Service" should "respond with ValidationRejection on negative timeout" in { Post(s"/crawler", ConfigProperties("https://www.websocket.org/echo.html", 2, -4)) ~> crawlerService.route ~> check { rejection === ValidationRejection("Cashflow entity has wrong structure!", None) } } "org.stanoq.Service" should "respond with ValidationRejection on bad url" in { Post(s"/crawler", ConfigProperties("https://www.co.hml", 2)) ~> crawlerService.route ~> check { rejection === ValidationRejection("Cashflow entity has wrong structure!", None) } } }
olka/stanoq
src/test/scala/org/stanoq/load/tests/CrawlerLoadTest.scala
package org.stanoq.load.tests import io.gatling.core.Predef._ import io.gatling.http.Predef._ import scala.concurrent.duration._ class CrawlerLoadTest extends Simulation { val httpConf = http .baseURL("http://localhost:9000") .acceptHeader("application/json,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8") .doNotTrackHeader("1") .acceptLanguageHeader("en-US,en;q=0.5") .acceptEncodingHeader("gzip, deflate") .userAgentHeader("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0") val scn = scenario("Crawler simulation") .exec(http("Crawler test") .post("/crawler").body(RawFileBody("config.json")).asJSON.check(status.is(200))).pause(5) setUp(scn.inject(rampUsersPerSec(30) to 30 during(30 seconds))).protocols(httpConf) }
olka/stanoq
src/test/scala/org/stanoq/tests/crawler/CrawlerSpec.scala
package org.stanoq.tests.crawler import org.scalatest._ import org.stanoq.crawler.Crawler import spray.json._ import org.stanoq.crawler.model.{ConfigProperties, Node} class CrawlerSpec extends FlatSpec with Matchers { "Crawler" should "handle recursive page structure" in { val crawler = new Crawler(ConfigProperties("https://www.websocket.org/echo.html", 2)).process() crawler.visitedPages.filter(url => crawler.root.print.contains(url)).size shouldBe crawler.root.convertToNode.getChildCount-1 } "Crawler" should "properly process gatling.io" in { val crawler = new Crawler(ConfigProperties("http://gatling.io", 1)).process() crawler.root.convertToNode.getChildCount-1 shouldBe crawler.visitedPages.size crawler.visitedPages.size should be >=1 } "Crawler" should "handle wrong site page" in { val crawler = new Crawler(ConfigProperties("http://gatl.tt", 1)).process() crawler.root.convertToNode.getChildCount-1 shouldBe crawler.visitedPages.size } }
olka/stanoq
build.sbt
name := "stanoq" organization := "com.stanoq" version := "1.0" scalaVersion := "2.11.8" scalacOptions := Seq("-unchecked", "-deprecation", "-encoding", "utf8") enablePlugins(JavaAppPackaging) enablePlugins(GatlingPlugin) libraryDependencies ++= { val akkaV = "2.4.16" val akkaHttpV = "10.0.9" val scalaTestV = "3.0.1" Seq( "com.typesafe.akka" %% "akka-actor" % akkaV, "com.typesafe.akka" %% "akka-stream" % akkaV, "com.typesafe.akka" %% "akka-testkit" % akkaV, "com.typesafe.akka" %% "akka-http" % akkaHttpV, "com.typesafe.akka" %% "akka-http-spray-json" % akkaHttpV, "com.typesafe.akka" %% "akka-http-testkit" % akkaHttpV, "org.mongodb.scala" %% "mongo-scala-driver" % "2.0.0", "org.scalatest" %% "scalatest" % scalaTestV % "test", "io.gatling.highcharts" % "gatling-charts-highcharts" % "2.2.2" % "test", "io.gatling" % "gatling-test-framework" % "2.2.2" % "test", "com.pauldijou" %% "jwt-core" % "0.14.0", "org.jsoup" % "jsoup" % "1.10.3" ) } coverageMinimum := 85 coverageFailOnMinimum := true parallelExecution in Test := true coverageExcludedPackages := "org.stanoq.CorsSupport.*;org.stanoq.RestController.*"
olka/stanoq
src/main/scala/org/stanoq/crawler/MongoHelper.scala
<gh_stars>1-10 package org.stanoq.crawler import java.util.concurrent.TimeUnit import com.typesafe.config.ConfigFactory import org.mongodb.scala.{MongoClient, MongoCollection} import org.stanoq.crawler.model._ import org.mongodb.scala.bson.codecs.Macros._ import org.mongodb.scala.bson.codecs.DEFAULT_CODEC_REGISTRY import org.bson.codecs.configuration.CodecRegistries.{fromProviders, fromRegistries} import org.mongodb.scala.bson.conversions.Bson import org.mongodb.scala.model.Filters._ import scala.concurrent.Await import scala.concurrent.duration.Duration object MongoHelper { val config = ConfigFactory.load() val rawUrl = config.getString("mongo.url") val url = rawUrl.substring(0,rawUrl.lastIndexOf("/")+1) val databaseName = rawUrl.substring(rawUrl.lastIndexOf("/")+1) val mongoClient: MongoClient = MongoClient(rawUrl) val responseRegistry = fromRegistries(fromProviders(classOf[CrawlerResponse],classOf[ConfigProperties],classOf[Node],classOf[EchartResponse],classOf[EchartNode],classOf[EchartLink]), DEFAULT_CODEC_REGISTRY) val database = mongoClient.getDatabase(databaseName).withCodecRegistry(responseRegistry) val collection: MongoCollection[CrawlerResponse] = database.getCollection("crawler") def filterResponse(config:ConfigProperties):Bson = and(equal("config.url", config.url),equal("config.depthLimit", config.depthLimit)) def size = Await.result(collection.count().head(), Duration(10, TimeUnit.SECONDS)).toInt def getLatest:List[CrawlerResponse] = Await.result(collection.find().skip(size-1).toFuture(), Duration(10, TimeUnit.SECONDS)).toList def getAll(limit: Int) = Await.result(collection.find().limit(limit).toFuture(), Duration(10, TimeUnit.SECONDS)).toList def getResponse(url: String) = Await.result(collection.find(equal("config.url",url)).toFuture(),Duration(10, TimeUnit.SECONDS)) def persist(response: CrawlerResponse) = Await.result(collection.insertOne(response).head(), Duration(10, TimeUnit.SECONDS)) def deleteSite(config: ConfigProperties) = Await.result(collection.deleteOne(filterResponse(config)).head(),Duration(10, TimeUnit.SECONDS)) def deleteAll() = Await.result(collection.drop().head(),Duration(10, TimeUnit.SECONDS)) }
olka/stanoq
src/test/scala/org/stanoq/load/tests/GatlingRunner.scala
<filename>src/test/scala/org/stanoq/load/tests/GatlingRunner.scala package org.stanoq.load.tests import io.gatling.app.Gatling import io.gatling.core.config.GatlingPropertiesBuilder object GatlingRunner extends App { val props = new GatlingPropertiesBuilder props.simulationClass(classOf[CrawlerLoadTest].getCanonicalName) Gatling.fromMap(props.build) }
olka/stanoq
src/main/scala/org/stanoq/crawler/Crawler.scala
<reponame>olka/stanoq package org.stanoq.crawler import java.util.Collections import java.util.concurrent.{ConcurrentHashMap, TimeUnit} import akka.actor.ActorSystem import akka.event.Logging import org.jsoup.nodes.Document import org.jsoup.{Connection, HttpStatusException, Jsoup} import org.stanoq.crawler.model.{ConfigProperties, Page} import scala.collection.JavaConverters._ import scala.util.Try class Crawler(config:ConfigProperties){ val logger = Logging(ActorSystem(), getClass) val visitedPages = createSet[String] private val domain:String = config.getDomain val root: Page = new Page(domain, domain,0,0,0,createSet[Page]) def process(url: String="") = { logger.info("Processing " + config.url + url) crawl(config.url + url, 1, root) root.statusCode = 200 this } private def crawl(url: String, depth: Int, prev: Page) { if (visitedPages.contains(url.substring(0,url.length-1)) || !visitedPages.add(url)) return val (page,links) = getPage(url,prev) if(page.statusCode!=200) return prev.addChild(page) logger.info(url + " "+links.size) if(depth > config.depthLimit) return links.par.foreach(link => crawl(link, depth + 1, page)) } private def createSet[T] = Collections.newSetFromMap(new ConcurrentHashMap[T, java.lang.Boolean]).asScala private def getPage(url: String, prev:Page): (Page,List[String]) = { def parseLinksToVisit(doc: Document): List[String] ={ def predicate(l: String) = (!(l.trim.length<7 || l.startsWith("mailto"))) && l.contains(domain) doc.select("a").iterator().asScala.toStream.map(_.attr("abs:href")).filter(predicate).toList } def getDocument(con: Connection):(Page,List[String]) = { val time = System.nanoTime() val res = con.execute() val pageSize = res.bodyAsBytes().size val timeToLoad = TimeUnit.MILLISECONDS.convert(System.nanoTime()-time, TimeUnit.NANOSECONDS) val doc = res.parse() val links = parseLinksToVisit(doc) (new Page(url, doc.title(), 200,timeToLoad,pageSize,createSet[Page]),parseLinksToVisit(doc)) } (Try(Jsoup.connect(url).userAgent("Mozilla/5.0").timeout(30 * 1000)).map(getDocument).recover { case e: HttpStatusException => logger.error(e.getStatusCode + " :: on " + url); val errPage = new Page(url,e.getMessage,e.getStatusCode,4040,9000,createSet[Page]) prev.addChild(errPage); (errPage, List()) case e: Exception => logger.error(e.getMessage + " :: on " + url); val errPage = new Page(url,e.getMessage,500,5000,9000,createSet[Page]) prev.addChild(errPage);(errPage, List()) }).get } }
olka/stanoq
src/test/scala/org/stanoq/tests/crawler/ServiceSpec.scala
<filename>src/test/scala/org/stanoq/tests/crawler/ServiceSpec.scala package org.stanoq.tests.crawler import akka.event.NoLogging import akka.http.scaladsl.model.ContentTypes._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.testkit.{RouteTestTimeout, ScalatestRouteTest} import org.scalatest._ import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.stanoq.crawler.{CrawlerService, MongoHelper, StreamService} import org.stanoq.crawler.model._ import spray.json._ import scala.concurrent.duration._ import scala.io.Source class ServiceSpec extends AsyncFlatSpec with Matchers with ScalatestRouteTest with CrawlerProtocols with Eventually with IntegrationPatience with BeforeAndAfterAll{ override def testConfigSource = "akka.loglevel = DEBUG" def config = testConfig val logger = NoLogging implicit val timeout = RouteTestTimeout(5.seconds) val crawlerService = new CrawlerService val streamService = new StreamService val configJson = Source.fromFile("config.json").mkString val configProps = ConfigProperties("https://www.websocket.org/index.html",3) override def afterAll() = { println("After!") // shut down the web server MongoHelper.deleteSite(configProps) MongoHelper.deleteAll() } "CrawlerService" should "respond with 20 processed pages on crawling websocket.org with depth 3" in { Post(s"/crawler", configProps) ~> crawlerService.route ~> check { status shouldBe OK contentType shouldBe `application/json` responseAs[Node].getChildCount should be >=2 } } "CrawlerService" should "handle crawlerStream endpoint properly" in { Post(s"/crawlerStream", configJson.parseJson.convertTo[ConfigProperties]) ~> streamService.route ~> check { status shouldBe OK contentType shouldBe `application/json` // eventually { // responseAs[Seq[CrawlerResponse]].size should be > 0 // responseAs[Seq[CrawlerResponse]].head.node.value should include("websocket") // } } } "CrawlerService" should "getAll response" in { Get(s"/sites") ~> crawlerService.route ~> check { status shouldBe OK contentType shouldBe `application/json` responseAs[Seq[CrawlerResponse]].head.node.value shouldBe "websocket.org Echo Test - Powered by Kaazing : 200" } } "CrawlerService" should "getLatest response" in { Get(s"/site") ~> crawlerService.route ~> check { status shouldBe OK contentType shouldBe `application/json` responseAs[Seq[CrawlerResponse]].head.node.value should include("websocket") responseAs[Seq[CrawlerResponse]].head.config.url shouldBe "https://www.websocket.org/echo.html" } } // "CrawlerService" should "get particular node" in { // Get(s"/node?value=test") ~> crawlerService.route ~> check { // status shouldBe OK // contentType shouldBe `application/json` // responseAs[Seq[Node]].head.value shouldBe "test" // } // } // "CrawlerService" should "delete node" in { // Delete(s"/node", Node("test",None,0)) ~> crawlerService.route ~> check { // status shouldBe Gone // } // Get(s"/nodes") ~> crawlerService.route ~> check { // status shouldBe OK // contentType shouldBe `application/json` // responseAs[Seq[Node]].size shouldBe 0 // } }
olka/stanoq
src/main/scala/org/stanoq/version/VersionService.scala
<reponame>olka/stanoq<gh_stars>1-10 package org.stanoq.version package org.stanoq.crawler import akka.http.scaladsl.model.{HttpEntity, _} import akka.http.scaladsl.server.Directives._ import com.typesafe.config.ConfigFactory class VersionService { val version = ConfigFactory.load().getString("stanoq.version") val route = pathPrefix("version") { pathEnd { get { complete(HttpResponse(StatusCodes.OK, entity = HttpEntity(ContentType(MediaTypes.`application/json`), s"""{"version": $version}"""))) } } } }
olka/stanoq
project/plugins.sbt
addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.3") addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.2.0-M7") addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.5.0") addSbtPlugin("io.gatling" % "gatling-sbt" % "2.2.0") addSbtPlugin("org.scoverage" % "sbt-coveralls" % "1.1.0")
olka/stanoq
src/test/scala/org/stanoq/load/tests/VersionLoadTest.scala
<reponame>olka/stanoq<filename>src/test/scala/org/stanoq/load/tests/VersionLoadTest.scala package org.stanoq.load.tests import io.gatling.core.Predef._ import io.gatling.http.Predef._ import scala.concurrent.duration._ class VersionLoadTest extends Simulation{ val httpConf = http.baseURL("http://stanoq.herokuapp.com").doNotTrackHeader("1") val scn = scenario("Version simulation") .exec(http("Version test").get("/version").check(status.is(200))) setUp(scn.inject(rampUsersPerSec(50) to 350 during(45 seconds))).protocols(httpConf) }
olka/stanoq
src/main/scala/org/stanoq/CorsSupport.scala
package org.stanoq import akka.http.scaladsl.model.HttpMethods._ import akka.http.scaladsl.model.{StatusCodes, HttpResponse} import akka.http.scaladsl.model.headers._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{Directive0, Route} trait CorsSupport { //this directive adds access control headers to normal responses private def addAccessControlHeaders: Directive0 = { respondWithHeaders( `Access-Control-Allow-Origin`.*, `Access-Control-Allow-Credentials`(true), `Access-Control-Allow-Headers`("Origin", "Authorization", "Content-Type", "X-Requested-With") ) } //this handles preflight OPTIONS requests. private def preflightRequestHandler: Route = options { complete(HttpResponse(StatusCodes.OK).withHeaders(`Access-Control-Allow-Methods`(OPTIONS, POST, PUT, GET, DELETE))) } def corsHandler(r: Route) = addAccessControlHeaders { preflightRequestHandler ~ r } }
olka/stanoq
src/test/scala/org/stanoq/tests/VersionSpec.scala
<reponame>olka/stanoq<filename>src/test/scala/org/stanoq/tests/VersionSpec.scala package org.stanoq.tests import akka.http.scaladsl.model.ContentTypes._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.testkit.ScalatestRouteTest import org.scalatest._ import org.stanoq.version.org.stanoq.crawler.VersionService class VersionSpec extends FlatSpec with Matchers with ScalatestRouteTest { val versionService = new VersionService "Version service" should "respond with proper service version" in { Get("/version") ~> versionService.route ~> check { status shouldBe OK contentType shouldBe `application/json` entityAs[String] shouldBe s"""{"version": 0.1}""" } } }
olka/stanoq
src/main/scala/org/stanoq/RestController.scala
package org.stanoq import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.directives.DebuggingDirectives import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import org.stanoq.crawler.{CrawlerService, StreamService} import org.stanoq.version.org.stanoq.crawler.VersionService object RestController extends App with CorsSupport { implicit val system = ActorSystem() implicit val executor = system.dispatcher implicit val materializer = ActorMaterializer() val log = system.log val config = ConfigFactory.load() val logger = Logging(system, getClass) val crawlerService = new CrawlerService val versionService = new VersionService val streamService = new StreamService val angularRoute = pathPrefix("") {getFromResourceDirectory("webapp/dist") ~ getFromResource("webapp/dist/index.html")} val debug = pathPrefix("debug") { getFromBrowseableDirectories(".")} val routes = crawlerService.route ~ versionService.route ~ streamService.route ~ angularRoute ~ debug val loggedRoutes = DebuggingDirectives.logRequestResult("INFO:", Logging.InfoLevel)(routes) val bindingFuture = Http().bindAndHandle(corsHandler(routes), config.getString("http.interface"), config.getInt("http.port")) bindingFuture.map(_.localAddress).map(addr => s"Bound to $addr").foreach(log.info) sys.addShutdownHook(system.terminate()) }
olka/stanoq
src/main/scala/org/stanoq/auth/JwtAuth.scala
<filename>src/main/scala/org/stanoq/auth/JwtAuth.scala package org.stanoq.auth import pdi.jwt.{Jwt, JwtAlgorithm} /** * Jwt helper class * @param payload @Json object converted to @String * @param secret Combination of url and depth. This is UUID of crawling result */ case class JwtAuth(payload:String, secret:String){ def decode = Jwt.decodeRawAll(payload, secret, Seq(JwtAlgorithm.HS256)) def encode = Jwt.encode(payload, secret, JwtAlgorithm.HS256) }
olka/stanoq
src/main/scala/org/stanoq/crawler/StreamService.scala
<reponame>olka/stanoq package org.stanoq.crawler import akka.actor.ActorSystem import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport} import akka.http.scaladsl.server.Directives._ import akka.stream.ThrottleMode import akka.stream.impl.Stages.DefaultAttributes import akka.stream.scaladsl.{Flow, Source} import org.stanoq.crawler.model._ import spray.json._ import MongoHelper._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} class StreamService extends CrawlerProtocols{ implicit val blockingDispatcher: ExecutionContext = ActorSystem().dispatchers.lookup("blocking-dispatcher") implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json().withParallelMarshalling(parallelism = 8, unordered = false) def getResponse(config:ConfigProperties) = { val crawler = new Crawler(config) Future {crawler.process("/")} def pageRoot = crawler.root def properRoot = if(pageRoot.children.size>0)pageRoot.children.head else pageRoot def echartRoot = properRoot.parse def node = properRoot.convertToNode val source = { def response = CrawlerResponse(node,EchartResponse(echartRoot.map(_._1),echartRoot.flatMap(_._2)),config) def next(node: CrawlerResponse) = { if (node == null) {persist(response); None} else if (pageRoot.statusCode == 200) {println(response.toJson.toString);Some((null, response))} else Some((response, response)) } Source.unfold(response)(next).withAttributes(DefaultAttributes.buffer) } encodeResponse(complete(source.via(getThrottlingFlow[CrawlerResponse]))) } def getThrottlingFlow[T] = Flow[T].throttle(1, per = 450.millis, 1, ThrottleMode.shaping) val route = pathPrefix("crawlerStream") { pathEnd { (post & entity(as[ConfigProperties])) (getResponse) } } }
olka/stanoq
src/main/scala/org/stanoq/crawler/CrawlerService.scala
package org.stanoq.crawler import akka.actor.ActorSystem import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import org.stanoq.crawler.model._ import spray.json._ import scala.concurrent._ import scala.concurrent.Future class CrawlerService() extends CrawlerProtocols { implicit val blockingDispatcher: ExecutionContext = ActorSystem().dispatchers.lookup("blocking-dispatcher") def handleCrawlerRequest(config: ConfigProperties) = { validate(config.validate, "Config wasn't properly set!") { complete { val crawler = new Crawler(config).process("/") Future { val root: Node = crawler.root.convertToNode val crawlerEntity = HttpEntity(ContentType(MediaTypes.`application/json`), root.toJson.toString()) HttpResponse(StatusCodes.OK, entity = crawlerEntity) } } } } def getAll = complete(HttpResponse(StatusCodes.OK, entity = HttpEntity(ContentType(MediaTypes.`application/json`), MongoHelper.getAll(0).toList.toJson.toString()))) def getLatest = complete(HttpResponse(StatusCodes.OK, entity = HttpEntity(ContentType(MediaTypes.`application/json`), MongoHelper.getLatest.toList.toJson.toString()))) def getPage(url:String) = complete(HttpResponse(StatusCodes.OK, entity = HttpEntity(ContentType(MediaTypes.`application/json`), MongoHelper.getResponse(url).toList.toJson.toString()))) val route = pathPrefix("crawler") {pathEnd { (post & entity(as[ConfigProperties])) (handleCrawlerRequest)}}~ pathPrefix("site") {pathEnd { get (getLatest)}}~ pathPrefix("sites") {pathEnd {(get) (getAll)}} }
olka/stanoq
src/main/scala/org/stanoq/crawler/model/ConfigProperties.scala
<gh_stars>1-10 package org.stanoq.crawler.model import java.awt.Color import java.net.{URI, URL} import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import spray.json.{DefaultJsonProtocol, RootJsonFormat} import scala.collection.mutable.Set import scala.util.Try case class ConfigProperties(url: String, depthLimit: Int, timeout: Long = 5, exclusions: List[String] = List(",")) { require(url != null, "Config wasn't properly set!") def validate = { def validateUrl = Try(new URL(if (url.contains("http")) url else "http://" + url).getContent()).isFailure if (depthLimit < 0) false else if (timeout < 0) false else if (url.equals("")) false else if (validateUrl) false else true } def getDomain: String = Try(new URI(url).getHost).get } case class Page(url: String, name: String, var statusCode: Int, timeToLoad: Long, size: Int, children: Set[Page]) { def addChild(page: Page):Boolean = children.add(page) def print: String = url + children.map(_.print).mkString def convertToNode: Node = { def getChildNodes = if (children.size > 0) Some(children.map(_.convertToNode).toList) else None Node(s"$name : $statusCode", getChildNodes, timeToLoad) } def parse: List[(EchartNode, List[EchartLink])] = { def hue = if (timeToLoad > 1500 || statusCode!=200) 0f else (140-(timeToLoad / 30))/100f def color = "#"+Integer.toHexString(Color.HSBtoRGB(hue,1,0.75f)).substring(2) def category = { if(timeToLoad>1000) "red" else if (timeToLoad<500) "green" else "yellow" } def getTuple = (EchartNode(url, timeToLoad, statusCode, color, category, size), children.map(p => EchartLink(url, p.url)).toList) getTuple :: children.flatMap(_.parse).toList } } case class EchartLink(source: String, target: String) case class EchartNode(url: String, timeToLoad: Long, statusCode: Int, color: String, category: String, size: Long) case class EchartResponse(nodes: List[EchartNode], links: List[EchartLink]) case class Node(value: String, children: Option[List[Node]], id: Long) { def getChildCount: Int = if (children.isEmpty) 1 else 1 + children.get.map(_.getChildCount).sum } case class CrawlerResponse(node: Node, echart: EchartResponse, config: ConfigProperties) trait CrawlerProtocols extends SprayJsonSupport with DefaultJsonProtocol { implicit val nodeFormat: RootJsonFormat[Node] = rootFormat(lazyFormat(jsonFormat(Node, "value", "children", "id"))) implicit val configFormat: RootJsonFormat[ConfigProperties] = jsonFormat4(ConfigProperties.apply) implicit val elinkFormat: RootJsonFormat[EchartLink] = jsonFormat2(EchartLink.apply) implicit val enodeFormat: RootJsonFormat[EchartNode] = jsonFormat6(EchartNode.apply) implicit val echartFormat: RootJsonFormat[EchartResponse] = jsonFormat2(EchartResponse.apply) implicit val responseFormat: RootJsonFormat[CrawlerResponse] = jsonFormat3(CrawlerResponse.apply) }
olka/stanoq
src/test/scala/org/stanoq/tests/crawler/AuthSpec.scala
package org.stanoq.tests.crawler import org.scalatest._ import org.stanoq.auth.JwtAuth import spray.json._ import org.stanoq.crawler.model.{ConfigProperties, CrawlerProtocols} import pdi.jwt.exceptions.JwtValidationException class AuthSpec extends FlatSpec with Matchers with CrawlerProtocols { val config = ConfigProperties("https://www.websocket.org/index.html",3) var jwt = ""; "Auth" should "be able to encode JWT" in { jwt = new JwtAuth(config.toJson.toString, "secret").encode jwt.length should be >0 } "Auth" should "be able to properly decode JWT" in { config.toJson.toString shouldBe new JwtAuth(jwt, "secret").decode.get._2 } "Auth" should "should throw validation exception" in { assertThrows[JwtValidationException] { config.toJson.toString should not be new JwtAuth(jwt, "secret2").decode.get._2 } } }
Frugghi/TweetSpark
tweet-spark-scala/src/test/scala/com/tommasomadonia/spark/test/WordCountTests.scala
package com.tommasomadonia.spark.test import com.tommasomadonia.spark.Words import org.scalatest.{Matchers, GivenWhenThen, FlatSpec} class WordCountTests extends FlatSpec with JSONSchemaSpec with GivenWhenThen with Matchers { "Empty JSON" should "have no words" in { Given("an empty DataFrame") val dataFrame = dataFrameReader.json("test/empty.json") When("count words") val wordCounts = Words.count(dataFrame, false) Then("word counts should be empty") wordCounts shouldBe empty } "Ignored retweets" should "have no words" in { Given("an empty DataFrame") val dataFrame = dataFrameReader.json("test/retweet.json") When("count words") val wordCounts = Words.count(dataFrame, true) Then("word counts should be empty") wordCounts shouldBe empty } "Retweets" should "be coalesced" in { Given("an empty DataFrame") val dataFrame = dataFrameReader.json("test/retweet.json") When("count words") val wordCounts = Words.count(dataFrame, false).toSet Then("words counted") wordCounts shouldEqual Set( ("#Disney", 1), ("e", 2), ("OpenBionics", 1), ("producono", 1), ("protesi", 1), ("per", 1), ("bambini", 1), ("ispirate", 1), ("a", 1), ("#IronMan", 1), ("#Frozen", 1), ("#StarWars", 1), ("https://t.co/DxxxKxxxxo", 1), ("https://t.co/9xCxxPxxxR", 1) ) } "A tweet" should "ignore the emojis" in { Given("a DataFrame") val dataFrame = dataFrameReader.json("test/emoji.json") When("count words") val wordCounts = Words.count(dataFrame, false).toSet Then("words counted") wordCounts shouldEqual Set( ("Cinema", 1), ("stasera", 1), ("#starwars", 1), ("https://t.co/5xxkexsxxc", 1) ) } "A tweet with wrong indices" should "be counted correctly" in { Given("a DataFrame") val dataFrame = dataFrameReader.json("test/newline.json") When("count words") val wordCounts = Words.count(dataFrame, false).toSet Then("words counted") wordCounts shouldEqual Set( ("Fonte", 1), ("Corriere", 1), ("Fiorentino", 1), ("https://t.co/cxx1xxxxxJ", 1), ("#pittiuomo", 1), ("#firenze", 1), ("#StarWars", 1), ("#ilvolo", 1), ("12", 1), ("01", 1), ("2016", 1) ) } "A collection of tweets" should "be counted" in { Given("a DataFrame") val dataFrame = dataFrameReader.json("test/random.json") When("count words") val wordCounts = Words.count(dataFrame, false).toSet Then("words counted") wordCounts shouldEqual Set( ("I", 1), ("vecchi", 1), ("tromboni", 1), ("dello", 1), ("@ABCDEF", 1), ("ancora", 1), ("su", 1), ("#StarWars", 1), ("https://t.co/3xxxFxxxx1", 2), ("Ti", 1), ("odio", 1), ("caro", 1) ) } "A collection of tweets" should "be counted and ordered" in { Given("a DataFrame") val dataFrame = dataFrameReader.json("test/random.json") When("count top 1 word") val wordCounts = Words.count(dataFrame, false, 1) Then("the top 1 word") wordCounts shouldEqual Array( ("https://t.co/3xxxFxxxx1", 2) ) } }
Frugghi/TweetSpark
tweet-spark-scala/src/main/scala/com/tommasomadonia/spark/TwitterAnalyzer.scala
package com.tommasomadonia.spark import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.spark.sql.SQLContext import org.apache.spark.{SparkContext, SparkConf} object TwitterAnalyzer { def main(args: Array[String]) { if (args.length < 1) { System.err.println("Usage: " + this.getClass.getSimpleName + " <path>") System.exit(1) } // Validating the parameters val hadoopConfiguration = new Configuration() val path = new Path("hdfs://" + args(0)) val fileSystem = FileSystem.get(hadoopConfiguration) if (!fileSystem.exists(path)) { System.err.println("Path '" + args(0) + "' does not exists") System.exit(1) } // Initializing Spark context val sparkConfiguration = new SparkConf().setAppName(this.getClass.getSimpleName) val sparkContext = new SparkContext(sparkConfiguration) val sqlContext = new SQLContext(sparkContext) // Useful stuff def printlnTitle(title: String) = println("------ " + title + " ------") def measureTime[T](function: => T) = { val startTime = System.nanoTime val result = function println("Execution time: " + (System.nanoTime - startTime)/1e9 + " sec") result } // Initializing Tweets table val tweetsTable = "Tweets" val dataFrame = sqlContext.read.json(path.toString) dataFrame.registerTempTable(tweetsTable) printlnTitle("Tweet table schema") dataFrame.printSchema() dataFrame.cache() // Find more active tweeters { val limit = 20 printlnTitle(s"Top $limit active tweeters") measureTime { ActiveTweeters.find(limit, sqlContext, tweetsTable).show(limit, false) } } // Find more tweeted words { val limit = 20 printlnTitle(s"Top $limit tweeted words") measureTime { Words.countDF(dataFrame, false).show(limit, false) } } // Find more tweeted hashtags { val limit = 20 printlnTitle(s"Top $limit tweeted hashtags") measureTime { Words.countHashtagsDF(dataFrame, false).show(limit, false) } } // Find more tweeted words and authors { val authorLimit = 5 val wordLimit = 20 printlnTitle(s"Top $wordLimit words and top $authorLimit authors") measureTime { Words.countPerAuthor(dataFrame, false, authorLimit).take(wordLimit) }.foreach({ case ((word, count), list) => println(s"$word (tweeted $count times):") list.foreach({ case (author, count) => println(s"- $author: $count") }) }) } // Find more tweeted words in time { val limit = 20 val hours = 6 printlnTitle(s"Top $limit tweeted words/" + hours + "h") measureTime { Words.countInTime(dataFrame, false, hours, limit).collect }.foreach({ case ((timeSlice, count), list) => println(s"$timeSlice, $count tweets:") list.foreach(println) }) } sparkContext.stop() } }
Frugghi/TweetSpark
statsd-jvm-profiler/example/StatsDProfilerFlowListener.scala
package com.etsy.cascading.flow import java.util.Properties import cascading.flow.{Flow, FlowListener, FlowStep} import org.apache.hadoop.mapred.JobConf import scala.collection.JavaConversions._ /** * Flow listener for setting up JobConf to enable statsd-jvm-profiler */ class StatsDProfilerFlowListener extends FlowListener { val baseParamsFormat = "-javaagent:%s=server=%s,port=%s,prefix=bigdata.profiler.%s.%s.%s.%%s.%%s,packageWhitelist=%s,packageBlacklist=%s,username=%s,password=%s,database=%s,reporter=%s,tagMapping=%s" override def onStarting(flow: Flow[_]): Unit = { val profilerProps = loadProperties("statsd-jvm-profiler.properties") val jarPath = profilerProps.getProperty("jar.location") val host = profilerProps.getProperty("host") val port = profilerProps.getProperty("port") val userName = System.getProperty("user.name") val flowId = flow.getID val jobName = flow.getName.replace(".", "-") val reporter = profilerProps.getProperty("reporter") val packageBlacklist = profilerProps.getProperty("package.blacklist") val packageWhiteList = profilerProps.getProperty("package.whitelist") val influxdbUser = profilerProps.getProperty("influxdb.user") val influxdbPassword = profilerProps.getProperty("influxdb.password") val influxdbDatabase = profilerProps.getProperty("influxdb.database") val dashboardUrl = profilerProps.getProperty("dashboard.url") val tagMapping = profilerProps.getProperty("tagMapping") val baseParams = baseParamsFormat.format(jarPath, host, port, userName, jobName, flowId, packageWhiteList, packageBlacklist, influxdbUser, influxdbPassword, influxdbDatabase, reporter, tagMapping) flow.getFlowSteps.toList foreach { fs: FlowStep[_] => val stepNum = fs.getStepNum.toString val conf = fs.getConfig.asInstanceOf[JobConf] val numReduceTasks = conf.get("mapreduce.job.reduces") conf.setBoolean("mapreduce.task.profile", true) conf.set("mapreduce.task.profile.map.params", baseParams.format(stepNum, "map")) conf.set("mapreduce.task.profile.reduce.params", baseParams.format(stepNum, "reduce")) // In newer versions of Cascading/Scalding it seems to no longer be possible to retrieve the correct // number of map or reduce tasks from the flow. // As such we have to profile a predetermined task every time, rather than picking a random one conf.set("mapreduce.task.profile.maps", getTaskToProfile(stepNum, "map", conf)) conf.set("mapreduce.task.profile.reduces", getTaskToProfile(stepNum, "reduce", conf)) // If you use https://github.com/etsy/Sahale this will cause links to the profiler dashboard to appear in Sahale // for jobs that are profiled val additionalLinksOrig = conf.get("sahale.additional.links", "") val additionalLinks = numReduceTasks.toInt match { case i: Int if i <= 0 => "%s;Profiler Dashboard - Map|%s".format(additionalLinksOrig, dashboardUrl.format("map")) case _ => "%s;Profiler Dashboard - Map|%s;Profiler Dashboard - Reduce|%s".format(additionalLinksOrig, dashboardUrl.format("map"), dashboardUrl.format("reduce")) } conf.set("sahale.additional.links", additionalLinks) } } private def getTaskToProfile(stage: String, phase: String, conf: JobConf): String = { val prop = "profiler.stage%s.%s".format(stage, phase) conf.get(prop, "0") } private def loadProperties(resourceName: String): Properties = { val props = new Properties() props.load(Thread.currentThread.getContextClassLoader.getResourceAsStream(resourceName)) props } override def onThrowable(flow: Flow[_], t: Throwable): Boolean = false override def onStopping(flow: Flow[_]): Unit = () override def onCompleted(flow: Flow[_]): Unit = () }
Frugghi/TweetSpark
tweet-spark-scala/src/test/scala/com/tommasomadonia/spark/test/SparkSpec.scala
package com.tommasomadonia.spark.test import org.apache.log4j.{Level, Logger} import org.apache.spark._ import org.scalatest._ trait SparkSpec extends BeforeAndAfterAll { this: Suite => private val master = "local[2]" private val appName = this.getClass.getSimpleName private var _sparkContext: SparkContext = _ def sparkContext = _sparkContext val conf: SparkConf = new SparkConf() .setMaster(master) .setAppName(appName) override def beforeAll(): Unit = { super.beforeAll() Logger.getLogger("org").setLevel(Level.ERROR) Logger.getLogger("akka").setLevel(Level.ERROR) Logger.getLogger("Remoting").setLevel(Level.ERROR) _sparkContext = new SparkContext(conf) _sparkContext.setLogLevel("ERROR") } override def afterAll(): Unit = { if (_sparkContext != null) { _sparkContext.stop() _sparkContext = null } super.afterAll() } }
Frugghi/TweetSpark
tweet-spark-scala/src/main/scala/com/tommasomadonia/spark/ActiveTweeters.scala
package com.tommasomadonia.spark import org.apache.spark.sql.{DataFrame, SQLContext} object ActiveTweeters { def find(sqlContext: SQLContext, table: String): DataFrame = { sqlContext.sql(s""" |SELECT user.screen_name, COUNT(*) AS total_count |FROM $table |WHERE user.screen_name IS NOT NULL |GROUP BY user.screen_name |ORDER BY total_count DESC""".stripMargin) } def find(limit: Int, sqlContext: SQLContext, table: String): DataFrame = { sqlContext.sql(s""" |SELECT user.screen_name, COUNT(*) AS total_count |FROM $table |WHERE user.screen_name IS NOT NULL |GROUP BY user.screen_name |ORDER BY total_count DESC |LIMIT $limit""".stripMargin) } }
Frugghi/TweetSpark
tweet-spark-scala/src/main/scala/com/tommasomadonia/spark/TweetDataFrame.scala
<filename>tweet-spark-scala/src/main/scala/com/tommasomadonia/spark/TweetDataFrame.scala package com.tommasomadonia.spark import org.apache.spark.sql.{Row, DataFrame} import org.apache.spark.sql.functions._ import scala.collection.mutable.ArrayBuffer private case class _Indices(indices: Array[Long]) package object dataframe_extension { implicit class TweetDataFrame(dataFrame: DataFrame) { def filterRetweets(filter: Boolean): DataFrame = if (filter && dataFrame.columns.contains("retweeted_status")) dataFrame.filter(col("retweeted_status").isNull) else dataFrame def filterMalformed(): DataFrame = dataFrame.filter(col("text").isNotNull).filter(col("created_at").isNotNull) def coalesceRetweets(): DataFrame = { val extractIndicesFunction: (Seq[Row] => Seq[_Indices]) = (elements: Seq[Row]) => { if (elements != null) { elements.map(row => _Indices(row.getAs[Seq[Long]]("indices").toArray)) } else { Array[_Indices]() } } val extractIndices = udf(extractIndicesFunction) if (!dataFrame.columns.contains("retweeted_status")) { dataFrame .withColumn("tweet_text", col("text")) .withColumn("hashtags", col("entities.hashtags")) .withColumn("media", extractIndices(col("entities.media"))) .withColumn("urls", col("entities.urls")) .withColumn("user_mentions", col("entities.user_mentions")) } else { dataFrame .withColumn("tweet_text", when(col("retweeted_status").isNull, col("text")).otherwise(col("retweeted_status.text"))) .withColumn("hashtags", when(col("retweeted_status").isNull, extractIndices(col("entities.hashtags"))).otherwise(extractIndices(col("retweeted_status.entities.hashtags")))) .withColumn("media", when(col("retweeted_status").isNull, extractIndices(col("entities.media"))).otherwise(extractIndices(col("retweeted_status.entities.media")))) .withColumn("urls", when(col("retweeted_status").isNull, extractIndices(col("entities.urls"))).otherwise(extractIndices(col("retweeted_status.entities.urls")))) .withColumn("user_mentions", when(col("retweeted_status").isNull, extractIndices(col("entities.user_mentions"))).otherwise(extractIndices(col("retweeted_status.entities.user_mentions")))) } } def tweetDataFrame(column: String): DataFrame = { val extractTweetFunction: ((String, Seq[Row], Seq[Row], Seq[Row], Seq[Row]) => Tweet) = (text: String, hashtags: Seq[Row], media: Seq[Row], urls: Seq[Row], user_mentions: Seq[Row]) => { val indices = ArrayBuffer.empty[(Long, Long)] if (hashtags != null) { indices ++= hashtags.map(row => row.getAs[Seq[Long]]("indices").toArray).map(index => (index(0), index(1))) } if (media != null) { indices ++= media.map(row => row.getAs[Seq[Long]]("indices").toArray).map(index => (index(0), index(1))) } if (urls != null) { indices ++= urls.map(row => row.getAs[Seq[Long]]("indices").toArray).map(index => (index(0), index(1))) } if (user_mentions != null) { indices ++= user_mentions.map(row => row.getAs[Seq[Long]]("indices").toArray).map(index => (index(0), index(1))) } Tweet(text, indices.toArray) } val extractTweet = udf(extractTweetFunction) dataFrame .coalesceRetweets() .withColumn(column, extractTweet(col("tweet_text"), col("hashtags"), col("media"), col("urls"), col("user_mentions"))) } } }
Frugghi/TweetSpark
tweet-spark-scala/src/test/scala/com/tommasomadonia/spark/test/SparkSQLSpec.scala
<reponame>Frugghi/TweetSpark package com.tommasomadonia.spark.test import org.apache.spark.sql.SQLContext import org.scalatest.Suite trait SparkSQLSpec extends SparkSpec { this: Suite => private var _sqlContext: SQLContext = _ def sqlContext = _sqlContext override def beforeAll(): Unit = { super.beforeAll() _sqlContext = new SQLContext(sparkContext) } override def afterAll(): Unit = { _sqlContext = null super.afterAll() } }
Frugghi/TweetSpark
tweet-spark-scala/src/test/scala/com/tommasomadonia/spark/test/JSONSchemaSpec.scala
package com.tommasomadonia.spark.test import org.apache.spark.sql.DataFrameReader import org.apache.spark.sql.types.StructType import org.scalatest.Suite trait JSONSchemaSpec extends SparkSQLSpec { this: Suite => private val jsonSchema = "test/schema.json" private var _schema: StructType = _ private var _dataFrameReader: DataFrameReader = _ def schema = _schema def dataFrameReader = _dataFrameReader override def beforeAll(): Unit = { super.beforeAll() _schema = sqlContext.read.json(jsonSchema).schema _dataFrameReader = sqlContext.read.schema(_schema) } override def afterAll(): Unit = { _dataFrameReader = null _schema = null super.afterAll() } }
Frugghi/TweetSpark
tweet-spark-scala/src/main/scala/com/tommasomadonia/spark/Tweet.scala
package com.tommasomadonia.spark import scala.collection.mutable.ArrayBuffer case class Tweet(text: String, indices: Array[(Long, Long)]) { def tokenize(): TraversableOnce[String] = this match { case Tweet(tweet, indices) if (tweet == null || tweet.isEmpty) => Array[String]() case Tweet(tweet, indices) => { val sortedIndices = (if (indices != null) indices else Array[(Long, Long)]()).sortWith(_._1 > _._1) val text = new StringBuilder(tweet.replaceAll("[^\u0000-\uFFFF]", " ").replaceAll("\\n", " ")) val token = ArrayBuffer.empty[String] for (index <- sortedIndices) { // Apparently Twitter API are bugged (?) and sometimes oob indices are returned var startIndex = index._1.toInt var endIndex = index._2.toInt if (endIndex > text.length) { val delta = endIndex - text.length startIndex -= delta endIndex -= delta } var word = text.substring(startIndex, endIndex) if (word.trim != word) { val headTrimmedWord = word.replaceFirst("^\\s+", "") if (word.length != headTrimmedWord.length) { val delta = word.length - headTrimmedWord.length startIndex += delta endIndex += delta } else { val tailTrimmedWord = word.replaceFirst("\\s+$", "") val delta = word.length - tailTrimmedWord.length startIndex -= delta endIndex -= delta } word = text.substring(startIndex, endIndex).trim } token += word text.delete(startIndex, endIndex) } token ++= "(\\w[\\w']*\\w|\\w)".r.findAllIn(text.toString).toArray[String] token.filter(_.nonEmpty).filterNot(Set("\u2026").contains(_)) } } }
Frugghi/TweetSpark
tweet-spark-scala/src/main/scala/com/tommasomadonia/spark/Words.scala
package com.tommasomadonia.spark import org.apache.spark.rdd.RDD import org.apache.spark.sql.{DataFrame, Row} import org.apache.spark.sql.functions._ import scala.collection.mutable.{ArrayBuffer, StringBuilder} import com.github.nscala_time.time.Imports._ import com.tommasomadonia.spark.dataframe_extension._ private case class Word(word: String) object Words { type WordCount = (String, Long) def countInTime(dataFrame: DataFrame, ignoreRetweets: Boolean, hours: Int): RDD[((String, Long), List[WordCount])] = dataFrame match { case dataFrame if !dataFrame.columns.contains("user") => dataFrame.sqlContext.sparkContext.emptyRDD[((String, Long), List[WordCount])] case dataFrame => { def moduloFloor(number: Int, modulo: Int) = number - (number % modulo) val timeSliceFunction: (String => String) = (timestamp: String) => { val inputFormat = DateTimeFormat.forPattern("EEE MMM dd HH:mm:ss Z yyyy").withOffsetParsed() val outputFormat = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm") val date = inputFormat.parseDateTime(timestamp) val startDate = date.withHour(moduloFloor(date.getHourOfDay, hours)).withMinuteOfHour(0) startDate.toString(outputFormat) + " - " + (startDate + hours.hours - 1.minutes).toString(outputFormat) } val timeSlice = udf(timeSliceFunction) val timeSlicedDataFrame = dataFrame .filterMalformed() .filterRetweets(ignoreRetweets) .withColumn("time_slice", timeSlice(col("created_at"))) val countDataFrame: DataFrame = timeSlicedDataFrame .groupBy("time_slice") .count() .withColumnRenamed("count", "total_tweets") timeSlicedDataFrame .tweetDataFrame("tweet") .explode(col("tweet")) { row => val indices = row.getStruct(0).getSeq[Row](1).map(index => (index.getLong(0), index.getLong(1))) Tweet(row.getStruct(0).getString(0), indices.toArray).tokenize().map(Word(_)) } .groupBy("time_slice", "word") .count() .join(countDataFrame, "time_slice") .orderBy(asc("time_slice"), desc("count")) .rdd .map(row => (row.getAs[String]("time_slice"), row.getAs[Long]("total_tweets")) -> (row.getAs[String]("word"), row.getAs[Long]("count"))) .groupByKey() .sortBy(_._1._1) .map({ case (key, wordCount) => key -> wordCount.toList.sortBy(-_._2) }) } } def countInTime(dataFrame: DataFrame, ignoreRetweets: Boolean = false, hours: Int, limit: Int): RDD[((String, Long), List[WordCount])] = { countInTime(dataFrame, ignoreRetweets, hours).map({ case (key, wordCount) => key -> wordCount.take(limit) }) } def countDF(dataFrame: DataFrame, ignoreRetweets: Boolean): DataFrame = dataFrame match { case dataFrame if !dataFrame.columns.contains("user") => dataFrame.sqlContext.emptyDataFrame case dataFrame => { dataFrame .filterMalformed() .filterRetweets(ignoreRetweets) .tweetDataFrame("tweet") .explode(col("tweet")) { row => val indices = row.getStruct(0).getSeq[Row](1).map(index => (index.getLong(0), index.getLong(1))) Tweet(row.getStruct(0).getString(0), indices.toArray).tokenize().map(Word(_)) } .groupBy("word") .count() .orderBy(desc("count")) } } def count(dataFrame: DataFrame, ignoreRetweets: Boolean): Seq[WordCount] = { countDF(dataFrame, ignoreRetweets).collect().map(row => (row.getAs("word"), row.getAs("count"))) } def count(dataFrame: DataFrame, ignoreRetweets: Boolean, limit: Int): Seq[WordCount] = { countDF(dataFrame, ignoreRetweets).take(limit).map(row => (row.getAs("word"), row.getAs("count"))) } def countPerAuthor(dataFrame: DataFrame, ignoreRetweets: Boolean): RDD[((String, Long), List[WordCount])] = dataFrame match { case dataFrame if !dataFrame.columns.contains("user") => dataFrame.sqlContext.sparkContext.emptyRDD[((String, Long), List[WordCount])] case dataFrame => { dataFrame .filterMalformed() .filterRetweets(ignoreRetweets) .tweetDataFrame("tweet") .explode(col("tweet")) { row => val indices = row.getStruct(0).getSeq[Row](1).map(index => (index.getLong(0), index.getLong(1))) Tweet(row.getStruct(0).getString(0), indices.toArray).tokenize().map(Word(_)) } .groupBy("word", "user.screen_name") .count() .rdd .map(row => row.getAs[String]("word") -> (row.getAs[String]("screen_name"), row.getAs[Long]("count"))) .groupByKey() .map({ case (key, list) => (key, list.map(_._2).reduce(_ + _)) -> (list.toList.sortBy(-_._2)) }) .sortBy(-_._1._2) } } def countPerAuthor(dataFrame: DataFrame, ignoreRetweets: Boolean = false, limitAuthor: Int): RDD[((String, Long), List[WordCount])] = { countPerAuthor(dataFrame, ignoreRetweets).map({ case (key, authorCount) => key -> authorCount.take(limitAuthor) }) } def countHashtagsDF(dataFrame: DataFrame, ignoreRetweets: Boolean): DataFrame = dataFrame match { case dataFrame if !dataFrame.columns.contains("user") => dataFrame.sqlContext.emptyDataFrame case dataFrame => { dataFrame .filterMalformed() .filterRetweets(ignoreRetweets) .tweetDataFrame("tweet") .explode(col("tweet")) { row => val indices = row.getStruct(0).getSeq[Row](1).map(index => (index.getLong(0), index.getLong(1))) Tweet(row.getStruct(0).getString(0), indices.toArray).tokenize().map(Word(_)) } .filter(col("word").startsWith("#")) .groupBy("word") .count() .orderBy(desc("count")) } } def countHashtags(dataFrame: DataFrame, ignoreRetweets: Boolean): Seq[WordCount] = { countHashtagsDF(dataFrame, ignoreRetweets).collect().map(row => (row.getAs("word"), row.getAs("count"))) } def countHashtags(dataFrame: DataFrame, ignoreRetweets: Boolean, limit: Int): Seq[WordCount] = { countHashtagsDF(dataFrame, ignoreRetweets).take(limit).map(row => (row.getAs("word"), row.getAs("count"))) } }
skygoo/octo
src/main/scala/org/seekloud/octo/ptcl/IceProtocol.scala
package org.seekloud.octo.ptcl /** * Created by sky * Date on 2019/8/16 * Time at 17:48 */ object IceProtocol { case class CandidateInfo( candidate: String, sdpMid: String, sdpMLineIndex: Int ) }
skygoo/octo
src/main/scala/org/seekloud/octo/http/SessionBase.scala
package org.seekloud.octo.http import akka.http.scaladsl.model._ import akka.http.scaladsl.server import akka.http.scaladsl.server.Directives.{complete, extractRequestContext, onComplete, redirect, reject} import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives.BasicDirectives import org.seekloud.octo.common.AppSettings import org.seekloud.octo.ptcl.{CommonErrorCode, ErrorRsp} import org.seekloud.octo.utils.{CirceSupport, SessionSupport} import org.slf4j.LoggerFactory /** * User: Taoz * Date: 12/4/2016 * Time: 7:57 PM */ object SessionBase { private val logger = LoggerFactory.getLogger(this.getClass) private val sessionTimeout = 24 * 60 * 60 * 1000 val SessionTypeKey = "STKey" object AdminSessionKey { val SESSION_TYPE = "medusa_adminSession" val aid = "medusa_aid" val name = "medusa_name" val loginTime = "medusa_loginTime" } object UserSessionKey { val SESSION_TYPE = "userSession" val playerId = "playerId" val playerName = "playerName" val timestamp = "timestamp" } case class AdminInfo ( aid: String, //username name: String //password ) case class AdminSession( adminInfo: AdminInfo, time: Long ){ def toAdminSessionMap = { Map( SessionTypeKey -> AdminSessionKey.SESSION_TYPE, AdminSessionKey.aid -> adminInfo.aid, AdminSessionKey.name -> adminInfo.name, AdminSessionKey.loginTime -> time.toString ) } } case class UserSession( playerId: String, playerName: String, timestamp: String ) { def toSessionMap = Map( SessionTypeKey -> UserSessionKey.SESSION_TYPE, UserSessionKey.playerId -> playerId, UserSessionKey.playerName -> playerName, UserSessionKey.timestamp -> timestamp ) } implicit class SessionTransformer(sessionMap: Map[String, String]) { def toAdminSession: Option[AdminSession] = { logger.debug(s"toAdminSession: change map to session, ${sessionMap.mkString(",")}") try{ if(sessionMap.get(SessionTypeKey).exists(_.equals(AdminSessionKey.SESSION_TYPE))){ if(sessionMap(AdminSessionKey.loginTime).toLong - System.currentTimeMillis() > sessionTimeout){ None } else{ Some(AdminSession( AdminInfo(sessionMap(AdminSessionKey.aid), sessionMap(AdminSessionKey.name) ), sessionMap(AdminSessionKey.loginTime).toLong )) } } else{ logger.debug("no session type in the session") None } } catch { case e: Exception => e.printStackTrace() logger.warn(s"toAdminSession: ${e.getMessage}") None } } def toUserSession: Option[UserSession] = { logger.debug(s"toUserSession: change map to session, ${sessionMap.mkString(",")}") try { if (sessionMap.get(SessionTypeKey).exists(_.equals(UserSessionKey.SESSION_TYPE))) { Some(UserSession( sessionMap(UserSessionKey.playerId), sessionMap(UserSessionKey.playerName), sessionMap(UserSessionKey.timestamp) )) } else { logger.debug("no session type in the session") None } } catch { case e: Exception => logger.warn(s"toUserSession: ${e.getMessage}") None } } } } trait SessionBase extends CirceSupport with SessionSupport { import SessionBase._ import io.circe.generic.auto._ override val sessionEncoder = SessionSupport.PlaySessionEncoder override val sessionConfig = AppSettings.sessionConfig // def noSessionError(message:String = "no session") = ErrorRsp(1000102,s"$message") protected def setUserSession(userSession: UserSession): Directive0 = setSession(userSession.toSessionMap) def authUser(f: UserSession => server.Route) = optionalUserSession { case Some(session) => f(session) case None => complete(CommonErrorCode.noSessionError()) } protected val optionalAdminSession: Directive1[Option[AdminSession]] = optionalSession.flatMap{ case Right(sessionMap) => BasicDirectives.provide(sessionMap.toAdminSession) case Left(error) => logger.debug(error) BasicDirectives.provide(None) } protected val optionalUserSession: Directive1[Option[UserSession]] = optionalSession.flatMap { case Right(sessionMap) => BasicDirectives.provide(sessionMap.toUserSession) case Left(error) => logger.debug(error) BasicDirectives.provide(None) } def noSessionError(message:String = "no session") = ErrorRsp(1000102,s"$message") def loggingAction: Directive[Tuple1[RequestContext]] = extractRequestContext.map { ctx => // log.info(s"Access uri: ${ctx.request.uri} from ip ${ctx.request.uri.authority.host.address}.") ctx } }
skygoo/octo
src/main/scala/org/seekloud/octo/http/HttpService.scala
package org.seekloud.octo.http import akka.actor.{ActorSystem, Scheduler} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.stream.Materializer import akka.util.Timeout import akka.actor.typed.scaladsl.AskPattern._ import scala.concurrent.Future import scala.concurrent.ExecutionContextExecutor import io.circe._ import io.circe.syntax._ import io.circe.generic.auto._ /** * User: Taoz * Date: 8/26/2016 * Time: 10:27 PM */ trait HttpService extends ResourceService with ServiceUtils with SocketService { implicit val system: ActorSystem implicit val executor: ExecutionContextExecutor implicit val materializer: Materializer implicit val timeout: Timeout implicit val scheduler: Scheduler lazy val httpRoutes: Route = ignoreTrailingSlash { pathPrefix("octo") { pathPrefix("index") { pathEndOrSingleSlash { getFromResource("html/index.html") } } ~ resourceRoutes } } lazy val httpsRoutes: Route = ignoreTrailingSlash { pathPrefix("octo") { pathPrefix("index") { pathEndOrSingleSlash { getFromResource("html/test.html") } } ~ resourceRoutes ~ joinRoute } } }
skygoo/octo
src/main/scala/org/seekloud/octo/common/AppSettings.scala
// Copyright 2018 seekloud (https://github.com/seekloud) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.seekloud.octo.common import java.util.concurrent.TimeUnit import com.typesafe.config.{Config, ConfigFactory} import org.seekloud.octo.utils.SessionSupport.SessionConfig import org.slf4j.LoggerFactory import collection.JavaConverters._ /** * User: Taoz * Date: 9/4/2015 * Time: 4:29 PM */ object AppSettings { private implicit class RichConfig(config: Config) { val noneValue = "none" def getOptionalString(path: String): Option[String] = if (config.getAnyRef(path) == noneValue) None else Some(config.getString(path)) def getOptionalLong(path: String): Option[Long] = if (config.getAnyRef(path) == noneValue) None else Some(config.getLong(path)) def getOptionalDurationSeconds(path: String): Option[Long] = if (config.getAnyRef(path) == noneValue) None else Some(config.getDuration(path, TimeUnit.SECONDS)) } val log = LoggerFactory.getLogger(this.getClass) val config = ConfigFactory.parseResources("product.conf").withFallback(ConfigFactory.load()) val appConfig = config.getConfig("app") val httpInterface = appConfig.getString("http.interface") val httpPort = appConfig.getInt("http.port") val sessionConfig = { val sConf = config.getConfig("session") SessionConfig( cookieName = sConf.getString("cookie.name"), serverSecret = sConf.getString("serverSecret"), domain = sConf.getOptionalString("cookie.domain"), path = sConf.getOptionalString("cookie.path"), secure = sConf.getBoolean("cookie.secure"), httpOnly = sConf.getBoolean("cookie.httpOnly"), maxAge = sConf.getOptionalDurationSeconds("cookie.maxAge"), sessionEncryptData = sConf.getBoolean("encryptData") ) } val appSecureMap = { val appIds = appConfig.getStringList("client.appIds").asScala val secureKeys = appConfig.getStringList("client.secureKeys").asScala require(appIds.length == secureKeys.length, "appIdList.length and secureKeys.length not equel.") appIds.zip(secureKeys).toMap } val guestIdPrefix = "guest" val userIdPrefix = "user" val tlsInfo = (appConfig.getString("tls.password"),appConfig.getString("tls.p12Path")) }
skygoo/octo
src/main/scala/org/seekloud/octo/bridge/DtlsHandler.scala
<gh_stars>0 package org.seekloud.octo.bridge import java.io.IOException import javax.sdp.{MediaDescription, SdpException, SdpParseException, SessionDescription} import org.ice4j.ice.{CandidatePair, Component, IceMediaStream, IceProcessingState} import org.seekloud.octo.bridge.dtls.mock.Connect import org.seekloud.octo.ptcl.WebSocketSession import scala.collection.mutable import collection.JavaConverters._ /** * Created by sky * Date on 2019/8/16 * Time at 13:25 * todo implement dtls protocol * todo exchange srtp password */ object DtlsHandler { case object MediaType { val VIDEO = "video" val AUDIO = "audio" } private val handlers: mutable.HashMap[String, DtlsHandler] = mutable.HashMap.empty def addHandler(id:String,h:DtlsHandler) = handlers.put(id,h) def removeHandlers(id: String) = handlers.remove(id).foreach(_.close()) } class DtlsHandler( override val session: WebSocketSession ) extends IceHandler(session) { protected var rtcpMux = false private val connect = new Connect() protected def getLocalFingerPrint: String = connect.getLocalFingerPrint protected def notifyRemoteFingerprint(mediaType: String, remoteFingerprint: String): Unit = { log.info(s"$logPrefix remoteFingerprint $remoteFingerprint") } override def prepareAnswer(offerSdp: SessionDescription, answerSdp: SessionDescription): SessionDescription = { super.prepareAnswer(offerSdp, answerSdp) try { answerSdp.setAttribute("fingerprint", getLocalFingerPrint) val globalFingerPrint = offerSdp.getAttribute("fingerprint") if (globalFingerPrint != null) notifyRemoteFingerprint(null, globalFingerPrint) else { val mids = new mutable.HashMap[String, mutable.HashMap[String, String]] offerSdp.getMediaDescriptions(false).asScala.map(_.asInstanceOf[MediaDescription]).foreach(md => { try { mids.put(md.getMedia.getMediaType, new mutable.HashMap[String, String]) mids(md.getMedia.getMediaType).put("mid", md.getAttribute("mid")) mids(md.getMedia.getMediaType).put("msid", md.getAttribute("msid")) mids(md.getMedia.getMediaType).put("ssrc", md.getAttribute("ssrc")) val fingerPrint = md.getAttribute("fingerprint") notifyRemoteFingerprint(md.getMedia.getMediaType, fingerPrint) } catch { case e: SdpParseException => throw new RuntimeException(e) } }) answerSdp.getMediaDescriptions(false).asScala.map(_.asInstanceOf[MediaDescription]).foreach(md => { try { md.setAttribute("mid", mids(md.getMedia.getMediaType)("mid")) md.setAttribute("msid", mids(md.getMedia.getMediaType)("msid")) md.setAttribute("ssrc", mids(md.getMedia.getMediaType)("ssrc")) if ("audio".equalsIgnoreCase(md.getMedia.getMediaType)) md.setAttribute("fingerprint", getLocalFingerPrint) else if ("video".equalsIgnoreCase(md.getMedia.getMediaType)) answerSdp.setAttribute("fingerprint", getLocalFingerPrint) } catch { case e: SdpException => throw new RuntimeException(e) } }) } } catch { case e: SdpException => throw new RuntimeException(e) } answerSdp } @throws[IOException] protected def doOpenMediaStream(mediaType: String, rtpPair: CandidatePair, rtcpPair: CandidatePair, rtcpmux: Boolean) = if (rtpPair != null) { //client.connect(rtpPair.getDatagramSocket()); println("fsfashfiashf",rtcpPair.getDatagramSocket) connect.connect(rtpPair.getDatagramSocket) } def openStream(mediaType: String): Unit = { //fixme 此处需要修改为iceMediaStream.getComponent(Component.RTP).getSocket try { val iceMediaStreamOpt = getICEMediaStream(mediaType) iceMediaStreamOpt match { case Some(iceMediaStream) => val rtp = iceMediaStream.getComponent(Component.RTP) val rtpPair = rtp.getSelectedPair var rtcpPair: CandidatePair = null if (!rtcpMux) { val rtcp = iceMediaStream.getComponent(Component.RTP) rtcpPair = rtcp.getSelectedPair } doOpenMediaStream(mediaType, rtpPair, rtcpPair, rtcpMux) case None=> println("not init stream") } } catch { case e@(_: InterruptedException | _: IOException) => throw new RuntimeException(e) } } }
skygoo/octo
build.sbt
<filename>build.sbt name := "octo" version := "0.1" scalaVersion := "2.12.8" //resolvers += Resolver.mavenLocal resolvers += "Jisti ORG Snapshots" at "https://github.com/jitsi/jitsi-maven-repository/raw/master/snapshots/" //resolvers += "Jisti ORG Releases" at "https://github.com/jitsi/jitsi-maven-repository/raw/master/releases/" val projectMainClass = "org.seekloud.octo.Boot" lazy val root = (project in file(".")) .settings( mainClass in reStart := Some(projectMainClass), javaOptions in reStart += "-Xmx2g" ) .settings( name := "octo" ) .settings( //pack // If you need to specify main classes manually, use packSettings and packMain //packSettings, // [Optional] Creating `hello` command that calls org.mydomain.Hello#main(Array[String]) packMain := Map("octo" -> projectMainClass), packJvmOpts := Map("octo" -> Seq("-Xmx512m", "-Xms128m", "-XX:+HeapDumpOnOutOfMemoryError")), packExtraClasspath := Map("octo" -> Seq(".")) ) .settings( libraryDependencies ++= Dependencies.backendDependencies, libraryDependencies ++= Dependencies.testLibs, libraryDependencies ++= Dependencies.jitsiLibs, libraryDependencies ++= Dependencies.bcpLibs )
skygoo/octo
src/main/scala/org/seekloud/octo/ptcl/BrowserMsg.scala
<reponame>skygoo/octo package org.seekloud.octo.ptcl import org.seekloud.octo.ptcl.IceProtocol.CandidateInfo /** * Created by sky * Date on 2019/8/16 * Time at 16:56 */ object BrowserMsg { object MsgId { val PING = "PING" //前端定时发送 val PONG = "PONG" //后台回复 val Anchor_SDP_OFFER = "Anchor_SDP_OFFER" //主播连入 val Audience_SDP_OFFER = "Audience_SDP_OFFER" //connect 消息之后发送 val PROCESS_SDP_ANSWER = "PROCESS_SDP_ANSWER" //自动处理 val ADD_ICE_CANDIDATE = "ADD_ICE_CANDIDATE" //自动处理 val CONNECT = "CONNECT" //建立连线 val DISCONNECT = "DISCONNECT" //断连 } trait WsMsg case object Complete extends WsMsg case class Fail(ex: Throwable) extends WsMsg sealed trait WsJsonMsg extends WsMsg case class Test( test: String ) extends WsJsonMsg case class AnchorSdpOffer( sdpOffer: String ) extends WsJsonMsg case class AddIceCandidate( candidateInfo: CandidateInfo ) extends WsJsonMsg case class ProcessSdpAnswer( sdpAnswer: String )extends WsJsonMsg }
skygoo/octo
src/main/scala/org/seekloud/octo/bridge/IceHandler.scala
<filename>src/main/scala/org/seekloud/octo/bridge/IceHandler.scala<gh_stars>0 package org.seekloud.octo.bridge import java.beans.{PropertyChangeEvent, PropertyChangeListener} import java.io.IOException import javax.sdp.{MediaDescription, SdpException, SessionDescription} import org.ice4j.{Transport, TransportAddress} import org.ice4j.ice.{Agent, CandidatePair, CandidateType, Component, IceMediaStream, IceProcessingState, RemoteCandidate} import org.ice4j.ice.harvest.{StunCandidateHarvester, TurnCandidateHarvest, TurnCandidateHarvester} import org.ice4j.security.LongTermCredential import org.seekloud.octo.ptcl.IceProtocol.CandidateInfo import org.seekloud.octo.ptcl.{BrowserMsg, WebSocketSession} import org.slf4j.LoggerFactory import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import collection.JavaConverters._ /** * Created by sky * Date on 2019/8/15 * Time at 17:06 * use for ice and save info */ object IceHandler { case class IceStreamInfo( mid: String, mIndex: Int ) val stunInfo = new StunCandidateHarvester(new TransportAddress("172.16.58.3", 41640, Transport.UDP)) val turnCredential = new LongTermCredential("hu", "123456") val turnInfo = new TurnCandidateHarvester(new TransportAddress("172.16.58.3", 41640, Transport.UDP), turnCredential) } class IceHandler( val session: WebSocketSession ) { protected val log = LoggerFactory.getLogger(this.getClass) protected var logPrefix: String = session.id + " |" import IceHandler._ private val iceAgent = new Agent private var iceAgentStateIsRunning: Boolean = IceProcessingState.RUNNING == iceAgent.getState private val iceMediaStreamMap: mutable.HashMap[IceStreamInfo, IceMediaStream] = mutable.HashMap.empty iceAgent.addCandidateHarvester(stunInfo) iceAgent.addCandidateHarvester(turnInfo) iceAgent.setControlling(false) iceAgent.addStateChangeListener(new PropertyChangeListener() { override def propertyChange(evt: PropertyChangeEvent): Unit = { val oldState = evt.getOldValue.asInstanceOf[IceProcessingState] val newState = evt.getNewValue.asInstanceOf[IceProcessingState] log.info(logPrefix + s"change state from ${oldState.toString} to ${newState.toString}") iceAgentStateIsRunning = IceProcessingState.RUNNING == newState } }) protected def getIceState() = iceAgent.getState protected def close() = iceMediaStreamMap.foreach(stream => iceAgent.removeStream(stream._2)) def getICEMediaStream(mediaType: String): Option[IceMediaStream] = iceMediaStreamMap.find(_._1.mid == mediaType).map(_._2) def addStream(mediaType: String, mid: Int, rtcpmux: Boolean): Unit = { val mediaStream = iceAgent.createMediaStream(mediaType + session.id) // mediaStream.addPairChangeListener(new ICEManager#ICEHandler#PairChangeListener) iceMediaStreamMap.put(IceStreamInfo(mediaType, mid), mediaStream) //For each Stream create two components (RTP & RTCP) try { val rtp = iceAgent.createComponent(mediaStream, Transport.UDP, 10000, 10000, 11000) if (!rtcpmux) { val rtcp = iceAgent.createComponent(mediaStream, Transport.UDP, 10001, 10001, 11000) } } catch { case e@(_: IllegalArgumentException | _: IOException) => // TODO Auto-generated catch block e.printStackTrace() } } private def getLocalCandidates = { val localCandidates = new ArrayBuffer[CandidateInfo] iceMediaStreamMap.foreach { stream => stream._2.getComponents.forEach(cmp => cmp.getLocalCandidates.forEach(lc => localCandidates.append(CandidateInfo(lc.toString, stream._1.mid, stream._1.mIndex)) ) ) } localCandidates.toList } private var remoteUfrag: String = null private var remotePassword: String = null def setupFragPasswd(remoteUfrag: String, remotePassword: String): Unit = { this.remoteUfrag = remoteUfrag this.remotePassword = remotePassword } def startConnectivityEstablishment(): Unit = { iceMediaStreamMap.foreach { s => s._2.setRemoteUfrag(this.remoteUfrag) s._2.setRemotePassword(this.remotePassword) } try { getLocalCandidates.foreach(lc => session.session ! BrowserMsg.AddIceCandidate(lc)) iceAgent.startConnectivityEstablishment() } catch { case e: IOException => throw new RuntimeException(e) } } def processRemoteCandidate(candidateInfo: CandidateInfo): Unit = { var tokens: Array[String] = candidateInfo.candidate.split(":") if ("candidate".equalsIgnoreCase(tokens(0))) { // val stream: IceMediaStream = iceMediaStreamMap.find(_._1.mIndex == candidateInfo.sdpMLineIndex).get._2 iceMediaStreamMap.find(_._1.mIndex == candidateInfo.sdpMLineIndex) match { case Some(stream) => println("find--stream") tokens = tokens(1).split(" ") var i: Int = 0 val foundation: String = tokens({ i += 1 i - 1 }).trim val cmpId: Int = tokens({ i += 1 i - 1 }).trim.toInt val parentComponent: Component = stream._2.getComponent(cmpId) if (parentComponent != null) { val transport: Transport = Transport.parse(tokens({ i += 1 i - 1 }).trim.toLowerCase) val priority: Long = tokens({ i += 1 i - 1 }).trim.toLong val hostaddress: String = tokens({ i += 1 i - 1 }).trim val port: Int = tokens({ i += 1 i - 1 }).trim.toInt val transportAddress: TransportAddress = new TransportAddress(hostaddress, port, transport) var `type`: CandidateType = null if ("typ".equalsIgnoreCase(tokens(i).trim)) `type` = CandidateType.parse(tokens({ i += 1 i }).trim.toLowerCase) if (tokens.length > i && "generation" == tokens(i)) { val generation: Int = tokens({ i += 1 i }).trim.toInt i += 1 } var related: RemoteCandidate = null var rAddr: String = null if (tokens.length > i && "raddr".equalsIgnoreCase(tokens(i))) { rAddr = tokens({ i += 1 i }).trim i += 1 } var rport: Int = -1 if (tokens.length > i && "rport".equalsIgnoreCase(tokens(i))) { rport = tokens({ i += 1 i }).trim.toInt i += 1 } if (rAddr != null) { val rAddress: TransportAddress = new TransportAddress(rAddr, rport, transport) related = new RemoteCandidate(rAddress, parentComponent, `type`, foundation, priority, null) } val rc: RemoteCandidate = new RemoteCandidate(transportAddress, parentComponent, `type`, foundation, priority, related) if (iceAgentStateIsRunning) { parentComponent.addUpdateRemoteCandidates(rc) } else { parentComponent.addRemoteCandidate(rc) } } case None => println("none--stream") } } else throw new IllegalArgumentException("Does not start with candidate:") } def prepareAnswer(offerSdp: SessionDescription, answerSdp: SessionDescription): SessionDescription = { try { val localDescriptions = answerSdp.getMediaDescriptions(false).asScala println(localDescriptions.size) localDescriptions.map(_.asInstanceOf[MediaDescription]).foreach(md => try { if ("audio" == md.getMedia.getMediaType) { //md.setAttribute("mid", audiomediaStream.getName()); } else if ("video" == md.getMedia.getMediaType) { //md.setAttribute("mid", videomediaStream.getName()); } md.setAttribute("ice-ufrag", iceAgent.getLocalUfrag) md.setAttribute("ice-pwd", iceAgent.getLocalPassword) } catch { case e: SdpException => throw new RuntimeException(e) } ) } catch { case e: SdpException => throw new RuntimeException(e) } answerSdp } }
skygoo/octo
src/main/scala/org/seekloud/octo/ptcl/EpInfo.scala
package org.seekloud.octo.ptcl /** * Created by sky * Date on 2019/8/18 * Time at 14:24 * distinguish endpoint webSocketSession with info */ case class EpInfo(id:String)
skygoo/octo
src/main/scala/org/seekloud/octo/core/EndPointManager.scala
<reponame>skygoo/octo<filename>src/main/scala/org/seekloud/octo/core/EndPointManager.scala package org.seekloud.octo.core import akka.actor.typed.{ActorRef, Behavior} import akka.actor.typed.scaladsl.{ActorContext, Behaviors, TimerScheduler} import akka.http.scaladsl.model.ws.{Message, TextMessage} import akka.stream.{ActorAttributes, Supervision} import akka.stream.scaladsl.Flow import org.slf4j.LoggerFactory import scala.concurrent.Future import org.seekloud.octo.Boot.{executor, materializer} import org.seekloud.octo.ptcl.{BrowserMsg, EpInfo} import io.circe.generic.auto._ import io.circe.parser.decode import io.circe.syntax._ /** * Created by sky * Date on 2019/8/15 * Time at 16:09 * 管理多路rtc */ object EndPointManager { private val log = LoggerFactory.getLogger(this.getClass) trait Command final case class ChildDead[U](name: String, childRef: ActorRef[U]) extends Command final case class GetWebSocketFlow(replyTo: ActorRef[Flow[Message, Message, Any]], epInfo: EpInfo) extends Command def create(): Behavior[Command] = { log.info(s"LiveManager start...") Behaviors.setup[Command] { ctx => Behaviors.withTimers[Command] { implicit timer => idle() } } } private def idle() ( implicit timer: TimerScheduler[Command] ): Behavior[Command] = { Behaviors.receive[Command] { (ctx, msg) => msg match { case msg: GetWebSocketFlow => val userActor = getLiveActor(ctx, msg.epInfo) msg.replyTo ! getWebSocketFlow(userActor) Behaviors.same case unKnow => log.error(s"${ctx.self.path} receive a unknow msg when idle:$unKnow") Behaviors.same } } } private def getWebSocketFlow(userActor: ActorRef[EndPointWorker.Command]): Flow[Message, Message, Any] = { import scala.language.implicitConversions implicit def parseS2Json(s:String):Option[BrowserMsg.WsJsonMsg] = try{ val msg = decode[BrowserMsg.WsJsonMsg](s).right.get Some(msg) }catch { case e:Exception=> log.error(s"decode $s as WsJson error with ${e.getMessage}") None } Flow[Message] .collect { case TextMessage.Strict(m) => Future.successful(EndPointWorker.WebSocketMsg(m)) case TextMessage.Streamed(sm) => sm.runFold(new StringBuilder().result()) { case (s, str) => s.++(str) }.map { s => EndPointWorker.WebSocketMsg(s) } }.mapAsync(parallelism = 3)(identity) //同时处理Strict和Stream .via(EndPointWorker.flow(userActor)) .map { case t: BrowserMsg.WsJsonMsg => TextMessage.Strict(t.asJson.noSpaces) case x => log.debug(s"akka stream receive unknown msg=$x") TextMessage.apply("") }.withAttributes(ActorAttributes.supervisionStrategy(decider)) } private val decider: Supervision.Decider = { e: Throwable => e.printStackTrace() log.error(s"WS stream failed with $e") Supervision.Resume } private def getLiveActor(ctx: ActorContext[Command],epInfo: EpInfo): ActorRef[EndPointWorker.Command] = { val childName = s"LiveActor-${epInfo.id}" ctx.child(childName).getOrElse { val actor = ctx.spawn(EndPointWorker.create(epInfo), childName) // ctx.watchWith(actor, ChildDead(childName, actor)) actor }.unsafeUpcast[EndPointWorker.Command] } }
skygoo/octo
project/Dependencies.scala
<filename>project/Dependencies.scala import sbt._ /** * User: Taoz * Date: 6/13/2017 * Time: 9:38 PM */ object Dependencies { val slickV = "3.2.3" val akkaV = "2.5.22" val akkaHttpV = "10.1.8" val scalaXmlV = "1.1.0" val circeVersion = "0.9.3" val scalaJsDomV = "0.9.6" val akkaSeq = Seq( "com.typesafe.akka" %% "akka-actor" % akkaV withSources(), "com.typesafe.akka" %% "akka-actor-typed" % akkaV withSources(), "com.typesafe.akka" %% "akka-slf4j" % akkaV, "com.typesafe.akka" %% "akka-stream" % akkaV, "com.typesafe.akka" %% "akka-stream-typed" % akkaV ) val akkaHttpSeq = Seq( "com.typesafe.akka" %% "akka-http" % akkaHttpV, "com.typesafe.akka" %% "akka-http-testkit" % akkaHttpV ) val circeSeq = Seq( "io.circe" %% "circe-core" % circeVersion, "io.circe" %% "circe-generic" % circeVersion, "io.circe" %% "circe-parser" % circeVersion ) val scalaXml = "org.scala-lang.modules" %% "scala-xml" % "1.0.6" val nscalaTime = "com.github.nscala-time" %% "nscala-time" % "2.16.0" val hikariCP = "com.zaxxer" % "HikariCP" % "2.6.2" val logback = "ch.qos.logback" % "logback-classic" % "1.2.3" val codec = "commons-codec" % "commons-codec" % "1.10" // val asynchttpclient = "org.asynchttpclient" % "async-http-client" % "2.0.32" val ehcache = "net.sf.ehcache" % "ehcache" % "2.10.4" // val netty = "io.netty" % "netty-all" % "4.1.36.Final" val backendDependencies: Seq[ModuleID] = Dependencies.akkaSeq ++ Dependencies.akkaHttpSeq ++ Dependencies.circeSeq ++ Seq( Dependencies.scalaXml, Dependencies.nscalaTime, Dependencies.logback, Dependencies.codec, // Dependencies.asynchttpclient, Dependencies.ehcache ) val testLibs = Seq( "com.typesafe.akka" %% "akka-testkit" % akkaV % "test", "com.typesafe.akka" %% "akka-actor-testkit-typed" % akkaV % "test", "org.scalatest" %% "scalatest" % "3.0.7" % "test" ) val jitsiLibs: Seq[ModuleID] = Seq( "org.jitsi" % "ice4j" % "2.0.0-SNAPSHOT" ) val bcpVersion = "1.60" val bcpLibs :Seq[ModuleID] = Seq( "org.bouncycastle" % "bcprov-jdk15on" % bcpVersion, "org.bouncycastle" % "bcpkix-jdk15on" % bcpVersion, ) }
skygoo/octo
src/main/scala/org/seekloud/octo/core/EndPointWorker.scala
<reponame>skygoo/octo<gh_stars>0 package org.seekloud.octo.core import java.io.FileInputStream import akka.actor.typed.{ActorRef, Behavior} import akka.actor.typed.scaladsl.{Behaviors, StashBuffer, TimerScheduler} import akka.stream.OverflowStrategy import akka.stream.scaladsl.Flow import akka.stream.typed.scaladsl.{ActorSink, ActorSource} import javax.sdp.{MediaDescription, SdpParseException, SessionDescription} import org.opentelecoms.javax.sdp.NistSdpFactory import org.seekloud.octo.bridge.DtlsHandler import org.seekloud.octo.bridge.DtlsHandler.MediaType import org.seekloud.octo.ptcl.{BrowserMsg, EpInfo, WebSocketSession} import org.seekloud.octo.utils.FileUtil import org.slf4j.LoggerFactory /** * Created by sky * Date on 2019/8/15 * Time at 16:09 * 对接一路RtcPeerConnect */ object EndPointWorker { private val log = LoggerFactory.getLogger(this.getClass) trait Command final case class ChildDead[U](name: String, childRef: ActorRef[U]) extends Command case class WebSocketMsg(msg: Option[BrowserMsg.WsJsonMsg]) extends Command case object CompleteMsgFront extends Command case class FailMsgFront(ex: Throwable) extends Command case class UserFrontActor(actor: ActorRef[BrowserMsg.WsMsg]) extends Command case class UserLeft[U](actorRef: ActorRef[U]) extends Command case object Stop extends Command private def sink(actor: ActorRef[Command]) = ActorSink.actorRef[Command]( ref = actor, onCompleteMessage = CompleteMsgFront, onFailureMessage = FailMsgFront.apply ) def flow(actor: ActorRef[Command]): Flow[WebSocketMsg, BrowserMsg.WsMsg, Any] = { val in = Flow[WebSocketMsg].to(sink(actor)) val out = ActorSource.actorRef[BrowserMsg.WsMsg]( completionMatcher = { case BrowserMsg.Complete => }, failureMatcher = { case BrowserMsg.Fail(e) => e }, bufferSize = 128, overflowStrategy = OverflowStrategy.dropHead ).mapMaterializedValue(outActor => actor ! UserFrontActor(outActor)) Flow.fromSinkAndSource(in, out) } def create(epInfo: EpInfo): Behavior[Command] = { Behaviors.setup[Command] { ctx => log.info(s"${epInfo.id}| is starting...") implicit val stashBuffer = StashBuffer[Command](Int.MaxValue) Behaviors.withTimers[Command] { implicit timer => init4web(epInfo, s"${epInfo.id}|init|") } } } private def init4web(epInfo: EpInfo, logPrefix: String)( implicit stashBuffer: StashBuffer[Command], timer: TimerScheduler[Command] ): Behavior[Command] = Behaviors.receive[Command] { (ctx, msg) => msg match { case UserFrontActor(f) => log.info(s"$logPrefix Ws connect success") val h = new DtlsHandler(WebSocketSession(epInfo.id, ctx.self, f)) DtlsHandler.addHandler(epInfo.id, h) wait4web(epInfo, h, s"${epInfo.id}|work|") case Stop => log.info(s"$logPrefix stop") Behaviors.stopped case unKnow => stashBuffer.stash(unKnow) Behavior.same } } import collection.JavaConverters._ val answerDescription="v=0\no=mozilla...THIS_IS_SDPARTA-43.0.4 5452113973299446729 0 IN IP4 0.0.0.0\ns=-\nt=0 0\na=fingerprint:sha-256 90:7D:7B:AF:23:9B:12:24:8E:62:F2:E4:02:63:EB:3B:6C:D9:89:13:E4:5B:A2:60:44:2D:C2:59:4B:65:07:DC\na=ice-options:trickle\na=msid-semantic:WMS *\nm=video 9 UDP/TLS/RTP/SAVPF 100\nc=IN IP4 0.0.0.0\na=sendrecv\na=fmtp:100 max-f\ns=12288;max-f\na=ice-pwd:<PASSWORD>7f0\na=ice-ufrag:8fda67d3\na=mid:sdparta_0\na=rtcp-mux\na=rtcp-fb:100 nack\na=rtcp-fb:100 nack pli\na=rtcp-fb:100 ccm fir\na=rtpmap:100 VP8/90000\na=setup:active" private def wait4web(epInfo: EpInfo, handler: DtlsHandler, logPrefix: String)( implicit stashBuffer: StashBuffer[Command], timer: TimerScheduler[Command] ): Behavior[Command] = Behaviors.receive[Command] { (ctx, msg) => msg match { case msg: WebSocketMsg => msg.msg.foreach { case m: BrowserMsg.AnchorSdpOffer => try { val offerSdp = new NistSdpFactory().createSessionDescription(m.sdpOffer) val md = offerSdp.getMediaDescriptions(false).asScala.map(_.asInstanceOf[MediaDescription]).filter(_.getMedia.getMediaType == MediaType.VIDEO) md.foreach { r => val mediaType = r.getMedia.getMediaType handler.addStream(mediaType, r.getAttribute("mid").toInt, true) val remoteUfrag = r.getAttribute("ice-ufrag") val remotePasswd = r.getAttribute("ice-pwd") handler.setupFragPasswd(remoteUfrag, remotePasswd) handler.startConnectivityEstablishment() val answerSdp = new NistSdpFactory().createSessionDescription(answerDescription) handler.prepareAnswer(offerSdp, answerSdp) handler.session.session ! BrowserMsg.ProcessSdpAnswer(answerSdp.toString) handler.openStream(mediaType) } } catch { case e: SdpParseException => log.error(logPrefix + " " + e.getMessage) log.error(logPrefix + " " + m.sdpOffer) } case m: BrowserMsg.AddIceCandidate => handler.processRemoteCandidate(m.candidateInfo) } Behaviors.same case Stop => log.info(s"$logPrefix stop") Behaviors.stopped case unKnow => stashBuffer.stash(unKnow) Behavior.same } } }
skygoo/octo
src/main/scala/org/seekloud/octo/http/SocketService.scala
<gh_stars>0 package org.seekloud.octo.http import akka.http.scaladsl.model.ws.Message import akka.http.scaladsl.server.Directives._ import org.slf4j.LoggerFactory import akka.actor.typed.scaladsl.AskPattern._ import akka.http.scaladsl.server.Route import akka.stream.scaladsl.Flow import scala.concurrent.Future import org.seekloud.octo.Boot.{endPointManager, executor, scheduler, timeout} import org.seekloud.octo.core.EndPointManager import org.seekloud.octo.ptcl.EpInfo /** * Created by sky * Date on 2019/6/14 * Time at 下午3:52 * 本文件与前端建立socket连接 */ trait SocketService extends ServiceUtils { private val log = LoggerFactory.getLogger(this.getClass) private def userJoin = path("userJoin") { parameter( 'userId.as[String] ) {userId => val flowFuture: Future[Flow[Message, Message, Any]] = endPointManager ? (EndPointManager.GetWebSocketFlow(_, EpInfo(userId))) dealFutureResult( flowFuture.map(t => handleWebSocketMessages(t)) ) } } val joinRoute: Route = userJoin }
skygoo/octo
src/test/scala/org/seekloud/octo/Test.scala
package org.seekloud.octo import java.beans.PropertyChangeEvent import java.beans.PropertyChangeListener import java.io.BufferedReader import java.io.InputStreamReader import java.net.DatagramSocket import java.net.SocketAddress import java.util import org.ice4j.Transport import org.ice4j.TransportAddress import org.ice4j.ice.Agent import org.ice4j.ice.Component import org.ice4j.ice.IceMediaStream import org.ice4j.ice.IceProcessingState import org.ice4j.ice.LocalCandidate import org.ice4j.ice.NominationStrategy import org.ice4j.ice.RemoteCandidate import org.ice4j.ice.harvest.StunCandidateHarvester import org.ice4j.ice.harvest.TurnCandidateHarvester import org.ice4j.security.LongTermCredential /** * Created by sky * Date on 2019/8/13 * Time at 下午7:37 */ object Test { }
skygoo/octo
src/main/scala/org/seekloud/octo/Boot.scala
<filename>src/main/scala/org/seekloud/octo/Boot.scala package org.seekloud.octo import java.io.FileInputStream import java.security.{KeyStore, SecureRandom} import akka.actor.ActorSystem import akka.actor.typed.ActorRef import akka.actor.typed.scaladsl.adapter._ import akka.dispatch.MessageDispatcher import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext} import akka.stream.{ActorMaterializer, Materializer} import akka.util.Timeout import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory} import org.seekloud.octo.common.AppSettings import org.seekloud.octo.core.EndPointManager import org.seekloud.octo.http.HttpService import scala.language.postfixOps import scala.util.{Failure, Success} /** * Author: <NAME> * Date: 4/29/2019 * Time: 11:28 PM */ object Boot extends HttpService { import concurrent.duration._ implicit val system: ActorSystem = ActorSystem("octo", AppSettings.config) override implicit val materializer: Materializer = ActorMaterializer() override implicit val scheduler = system.scheduler override implicit val timeout: Timeout = Timeout(10 seconds) val log: LoggingAdapter = Logging(system, getClass) implicit val executor: MessageDispatcher = system.dispatchers.lookup("akka.actor.my-blocking-dispatcher") val endPointManager = system.spawn(EndPointManager.create(), "EndPointManager") def main(args: Array[String]): Unit = { log.info("Starting.") val password: Array[Char] = AppSettings.tlsInfo._1.toCharArray // do not store passwords in code, read them from somewhere safe! val ks: KeyStore = KeyStore.getInstance("PKCS12") val keystore = new FileInputStream(AppSettings.tlsInfo._2) require(keystore != null, "Keystore required!") ks.load(keystore, password) val keyManagerFactory: KeyManagerFactory = KeyManagerFactory.getInstance("SunX509") keyManagerFactory.init(ks, password) val tmf: TrustManagerFactory = TrustManagerFactory.getInstance("SunX509") tmf.init(ks) val sslContext: SSLContext = SSLContext.getInstance("TLS") sslContext.init(keyManagerFactory.getKeyManagers, tmf.getTrustManagers, new SecureRandom()) val https: HttpsConnectionContext = ConnectionContext.https(sslContext) val httpsBinding = Http().bindAndHandle(httpsRoutes, AppSettings.httpInterface, AppSettings.httpPort, connectionContext = https) //remind 此处实现http请求 // val httpBinding = Http().bindAndHandle(httpRoutes, httpInterface, httpPort + 1) httpsBinding.onComplete { case Success(b) ⇒ val localAddress = b.localAddress println(s"Server is listening on https://${localAddress.getHostName}:${localAddress.getPort}") case Failure(e) ⇒ println(s"httpsBinding failed with ${e.getMessage}") system.terminate() System.exit(-1) } // httpBinding.onComplete { // case Success(b) ⇒ // val localAddress = b.localAddress // println(s"Server is listening on http://${localAddress.getHostName}:${localAddress.getPort}") // case Failure(e) ⇒ // println(s"httpBinding failed with ${e.getMessage}") // system.terminate() // System.exit(-1) // } } }
skygoo/octo
src/main/scala/org/seekloud/octo/ptcl/WebSocketSession.scala
package org.seekloud.octo.ptcl import akka.actor.typed.ActorRef import org.seekloud.octo.core.EndPointWorker /** * Created by sky * Date on 2019/8/16 * Time at 15:16 */ case class WebSocketSession( id: String, actor:ActorRef[EndPointWorker.Command], session:ActorRef[BrowserMsg.WsMsg] )
Axiometry/agario-client
src/main/scala/me/axiometry/agario/Agario.scala
<gh_stars>1-10 package me.axiometry.agario import akka.io.IO import akka.pattern.ask import akka.actor.ActorSystem import akka.util.Timeout import spray.can.Http import spray.http._ import HttpHeaders._ import HttpMethods._ import MediaTypes._ import spray.client.pipelining._ import scala.concurrent._ import scala.concurrent.duration._ import scala.util.{ Try, Failure, Success } import grizzled.slf4j.Logging object Agario extends Logging { implicit val system = ActorSystem() import system.dispatcher private[this] implicit val timeout: Timeout = 10 seconds private[this] val ServerPattern = "<option value=\"([a-zA-Z]+)\\-([a-zA-Z]+)\">[a-zA-Z ]+</option>".r def servers: List[Server] = { val submit: HttpRequest => Future[HttpResponse] = sendReceive val response: Future[HttpResponse] = submit(Get("http://agar.io/")) val page = Try(Await.ready(response, timeout.duration)) match { case Success(_) if response.value.get.isSuccess => val res = response.value.get.get Some(res.entity.data.asString) case Success(_) => response.value.get match { case Failure(error) => debug("Request to agar.io failed", error) None case _ => None } case Failure(error) => debug("Request to agar.io failed", error) None case _ => None } page match { case Some(page) => (page split '\n' toList) flatMap { case ServerPattern(region, name) => serverAddr(region, name).map(addr => Server(region, name, addr)) case _ => None } case None => List() } } private[this] def serverAddr(region: String, name: String): Option[ServerAddress] = { debug(s"Sending request about $region-$name") val submit: HttpRequest => Future[HttpResponse] = sendReceive val response: Future[HttpResponse] = submit(Post("http://m.agar.io/", s"$region-$name")) Try(Await.ready(response, timeout.duration)) match { case Success(_) if response.value.get.isSuccess => val res = response.value.get.get res.entity.data.asString.split('\n')(0) match { case ServerAddress(addr) => Some(addr) case str => debug(s"Not an address: $str") None } case Success(_) => response.value.get match { case Failure(error) => debug("Request to m.agar.io failed", error) None case _ => None } case Failure(error) => debug("Request to m.agar.io failed (no response)", error) None case _ => None } } }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/Entity.scala
<filename>src/main/scala/me/axiometry/agario/Entity.scala package me.axiometry.agario trait Entity { def world: World def x: Double def x_=(x: Double): Unit def y: Double def y_=(y: Double): Unit }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/net/DataView.scala
<reponame>Axiometry/agario-client package me.axiometry.agario.net import akka.util.ByteString case class DataView(array: Array[Byte], offset: Int, length: Int)(implicit val byteOrder: ByteOrder = ByteOrder.BigEndian) { import ByteOrder._ def getInt8(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Byte = array(off) def setInt8(off: Int, value: Byte)(implicit byteOrder: ByteOrder = byteOrder): Unit = array(off) = value def getUint8(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Int = array(off) & 0xFF def setUint8(off: Int, value: Int)(implicit byteOrder: ByteOrder = byteOrder): Unit = array(off) = value.toByte def getInt16(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Short = byteOrder match { case BigEndian => ((array(off).toInt << 8) + array(off+1).toInt).toShort case LittleEndian => ((array(off).toInt << 8) + array(off).toInt).toShort } def setInt16(off: Int, value: Short)(implicit byteOrder: ByteOrder = byteOrder): Unit = byteOrder match { case BigEndian => array(off) = ((value >>> 8) & 0xFF).toByte array(off+1) = (value & 0xFF).toByte case LittleEndian => array(off+1) = ((value >>> 8) & 0xFF).toByte array(off) = (value & 0xFF).toByte } def getUint16(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Int = byteOrder match { case BigEndian => ((array(off).toInt << 8) + array(off+1)) case LittleEndian => ((array(off+1).toInt << 8) + array(off)) } def setUint16(off: Int, value: Int)(implicit byteOrder: ByteOrder = byteOrder): Unit = byteOrder match { case BigEndian => array(off) = ((value >>> 8) & 0xFF).toByte array(off+1) = (value & 0xFF).toByte case LittleEndian => array(off+1) = ((value >>> 8) & 0xFF).toByte array(off) = (value & 0xFF).toByte } def getInt32(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Int = byteOrder match { case BigEndian => ((array(off).toInt << 24) + (array(off+1).toInt << 16) + (array(off+2).toInt << 8) + array(off+3).toInt) case LittleEndian => ((array(off+3).toInt << 24) + (array(off+2).toInt << 16) + (array(off+1).toInt << 8) + array(off).toInt) } def setInt32(off: Int, value: Int)(implicit byteOrder: ByteOrder = byteOrder): Unit = byteOrder match { case BigEndian => array(off) = ((value >>> 24) & 0xFF).toByte array(off+1) = ((value >>> 16) & 0xFF).toByte array(off+2) = ((value >>> 8) & 0xFF).toByte array(off+3) = (value & 0xFF).toByte case LittleEndian => array(off+3) = ((value >>> 24) & 0xFF).toByte array(off+2) = ((value >>> 16) & 0xFF).toByte array(off+1) = ((value >>> 8) & 0xFF).toByte array(off) = (value & 0xFF).toByte } def getUint32(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Long = byteOrder match { case BigEndian => ((array(off).toLong << 24) + (array(off+1).toLong << 16) + (array(off+2).toLong << 8) + array(off+3).toLong) case LittleEndian => ((array(off+3).toLong << 24) + (array(off+2).toLong << 16) + (array(off+1).toLong << 8) + array(off).toLong) } def setUint32(off: Int, value: Long)(implicit byteOrder: ByteOrder = byteOrder): Unit = byteOrder match { case BigEndian => array(off) = ((value >>> 24) & 0xFF).toByte array(off+1) = ((value >>> 16) & 0xFF).toByte array(off+2) = ((value >>> 8) & 0xFF).toByte array(off+3) = (value & 0xFF).toByte case LittleEndian => array(off+3) = ((value >>> 24) & 0xFF).toByte array(off+2) = ((value >>> 16) & 0xFF).toByte array(off+1) = ((value >>> 8) & 0xFF).toByte array(off) = (value & 0xFF).toByte } def getFloat32(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Float = java.lang.Float.intBitsToFloat(getInt32(off)) def setFloat32(off: Int, value: Float)(implicit byteOrder: ByteOrder = byteOrder): Unit = setInt32(off, java.lang.Float.floatToRawIntBits(value)) def getFloat64(off: Int)(implicit byteOrder: ByteOrder = byteOrder): Double = java.lang.Double.longBitsToDouble(getUint32(off)) def setFloat64(off: Int, value: Double)(implicit byteOrder: ByteOrder = byteOrder): Unit = setUint32(off, java.lang.Double.doubleToRawLongBits(value)) def toByteString() = ByteString.fromArray(array, offset, length) } object DataView { def apply(length: Int): DataView = DataView(Array.ofDim(length), 0, length) def apply(array: Array[Byte]): DataView = DataView(array, 0, array.length) } sealed trait ByteOrder object ByteOrder { case object LittleEndian extends ByteOrder case object BigEndian extends ByteOrder }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/Client.scala
<filename>src/main/scala/me/axiometry/agario/Client.scala package me.axiometry.agario import me.axiometry.agario.net._ import akka.actor.{ ActorSystem, Props } import grizzled.slf4j.Logging import spray.can.Http import spray.can.websocket.frame._ import spray.http._ import spray.httpx.RequestBuilding._ case class Client private (val name: String, val server: Server)(implicit system: ActorSystem) extends Logging { private[this] val websocket = { val ServerAddress(host, port) = server.address val connect = Http.Connect(host, port, false) val request = (Get("/") ~> addHeader(HttpHeaders.Host(host, port)) ~> addHeader(HttpHeaders.Connection("Upgrade")) ~> addHeader("Upgrade", "websocket") ~> addHeader("Sec-WebSocket-Version", "13") ~> addHeader("Sec-WebSocket-Key", "<KEY>) ~> addHeader("Sec-WebSocket-Extensions", "permessage-deflate")) system.actorOf(Props( new WebSocketClient(connect, request) { def onConnect() { debug("Connected") connection ! BinaryFrame { val view = DataView(5) view.setUint8(0, 254) view.setUint32(1, 1)(ByteOrder.LittleEndian) view.toByteString() } connection ! BinaryFrame { val view = DataView(5) view.setUint8(0, 255) view.setUint32(1, 1)(ByteOrder.LittleEndian) view.toByteString() } } def onMessage(frame: Frame) { val view = DataView(frame.payload.toArray) debug("Got packet: " + view.getUint8(0)) } def onClose() { debug("Disconnected") } } )) } }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/Server.scala
<filename>src/main/scala/me/axiometry/agario/Server.scala package me.axiometry.agario import akka.actor.ActorSystem import scala.util._ case class ServerAddress(host: String, port: Int) object ServerAddress { private[this] val ServerPattern = "([^:]+):([0-9]+)".r def unapply(string: String): Option[ServerAddress] = string match { case ServerPattern(host, port) => Try(port.toInt) match { case Success(port) => Some(ServerAddress(host, port)) case Failure(_) => None } case _ => None } } case class Server(val region: String, val name: String, val address: ServerAddress)(implicit system: ActorSystem) { def createClient(name: String = null): Client = ??? }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/World.scala
<reponame>Axiometry/agario-client<filename>src/main/scala/me/axiometry/agario/World.scala package me.axiometry.agario class World { }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/net/WebSocketClient.scala
package me.axiometry.agario.net import akka.actor.ActorSystem import akka.io.IO import spray.can.Http import spray.can.server.UHttp import spray.can.websocket._ import spray.can.websocket.frame._ import spray.http.HttpRequest abstract class WebSocketClient(connect: Http.Connect, val upgradeRequest: HttpRequest)(implicit system: ActorSystem) extends WebSocketClientWorker { IO(UHttp) ! connect def businessLogic: Receive = { case UpgradedToWebSocket => onConnect() case frame: Frame => onMessage(frame) case _: Http.ConnectionClosed => onClose() context.stop(self) } def onConnect() def onMessage(frame: Frame) def onClose() }
Axiometry/agario-client
src/main/scala/me/axiometry/agario/Carb.scala
<reponame>Axiometry/agario-client package me.axiometry.agario case class Carb(override val world: World) extends Entity { override var x: Double = _ override var y: Double = _ }
sequencer/rocket-dsp-utils
src/main/scala/freechips/rocketchip/jtag2mm/JtagFuzzer.scala
// SPDX-License-Identifier: Apache-2.0 package freechips.rocketchip.jtag2mm import chisel3._ import chisel3.experimental._ import chisel3.util._ import chisel3.util.random.LFSR class InvertedJtagIO extends Bundle { // TRST (4.6) is optional and not currently implemented. val TCK = Output(Bool()) val TMS = Output(Bool()) val TDI = Output(Bool()) val TDO = Input(new Bool()) } class InvertedJtagIOPlus extends InvertedJtagIO { val finished = Output(Bool()) } class JtagFuzzer(irLength: Int, beatBytes: Int, numOfTransfers: Int) extends Module { val io = IO(new InvertedJtagIOPlus) object State extends ChiselEnum { val sIdle, sTCK, sTMS, sTCKandTMS, sNone, sDataTCK, sDataTMS, sDataTCKandTMS, sDataNone = Value } val lfsrAddrReg = RegInit(UInt(16.W), 0.U) lfsrAddrReg := LFSR(14) << 2 val lfsrDataReg = RegInit(UInt(32.W), 0.U) lfsrDataReg := LFSR(32) val dataBitCounter = RegInit(UInt(8.W), 0.U) val idleCycleCounter = RegInit(UInt(4.W), 0.U) val transferCounter = RegInit(UInt(4.W), 0.U) io.finished := Mux(transferCounter >= numOfTransfers.U, true.B, false.B) val state = RegInit(State.sIdle) val stateCounter = RegInit(UInt(10.W), 0.U) when(state =/= RegNext(state)) { stateCounter := stateCounter + 1.U } io.TCK := DontCare io.TMS := DontCare io.TDI := DontCare switch(state) { is(State.sIdle) { when((idleCycleCounter === 10.U) && (transferCounter < numOfTransfers.U)){ state := State.sTMS } io.TCK := false.B io.TMS := false.B io.TDI := false.B idleCycleCounter := idleCycleCounter + 1.U stateCounter := 0.U dataBitCounter := 0.U } is(State.sTMS) { // jtag init when((stateCounter === 0.U) || (stateCounter === 2.U) || (stateCounter === 4.U) || (stateCounter === 6.U) || (stateCounter === 8.U)) { state := State.sTCKandTMS } // first instruction .elsewhen((stateCounter === 12.U) || (stateCounter === 14.U) || (stateCounter === 28.U)){ state := State.sTCKandTMS } // first data .elsewhen((stateCounter === 32.U) || (stateCounter === 70.U)){ state := State.sTCKandTMS } // second instruction .elsewhen((stateCounter === 74.U) || (stateCounter === 76.U) || (stateCounter === 90.U)){ state := State.sTCKandTMS } // second data .elsewhen((stateCounter === 94.U) || (stateCounter === 164.U)){ state := State.sTCKandTMS } // third instruction .elsewhen((stateCounter === 168.U) || (stateCounter === 170.U) || (stateCounter === 184.U)){ state := State.sTCKandTMS } io.TCK := false.B io.TMS := true.B io.TDI := false.B dataBitCounter := 0.U idleCycleCounter := 0.U } is(State.sTCK) { // endings when((stateCounter === 11.U) || (stateCounter === 31.U) || (stateCounter === 73.U) || (stateCounter === 93.U) || (stateCounter === 167.U)) { state := State.sTMS } // first instruction .elsewhen(stateCounter === 17.U){ state := State.sNone } .elsewhen(stateCounter === 19.U) { state := State.sDataNone } // first data .elsewhen(stateCounter === 35.U){ state := State.sNone } .elsewhen(stateCounter === 37.U) { state := State.sDataNone } // second instruction .elsewhen(stateCounter === 79.U){ state := State.sNone } .elsewhen(stateCounter === 81.U) { state := State.sDataNone } // second data .elsewhen(stateCounter === 97.U){ state := State.sNone } .elsewhen(stateCounter === 99.U) { state := State.sDataNone } // third instruction .elsewhen(stateCounter === 173.U){ state := State.sNone } .elsewhen(stateCounter === 175.U) { state := State.sDataNone } // the end .elsewhen(stateCounter === 187.U){ state := State.sIdle transferCounter := transferCounter + 1.U } io.TCK := true.B io.TMS := false.B io.TDI := false.B dataBitCounter := 0.U } is(State.sNone) { // jtag init when(stateCounter === 10.U) { state := State.sTCK } // first instruction .elsewhen((stateCounter === 16.U) || (stateCounter === 18.U) || (stateCounter === 30.U)){ state := State.sTCK } // first data .elsewhen((stateCounter === 34.U) || (stateCounter === 36.U) || (stateCounter === 72.U)){ state := State.sTCK } // second instruction .elsewhen((stateCounter === 78.U) || (stateCounter === 80.U) || (stateCounter === 92.U)){ state := State.sTCK } // second data .elsewhen((stateCounter === 96.U) || (stateCounter === 98.U) || (stateCounter === 166.U)){ state := State.sTCK } // third instruction .elsewhen((stateCounter === 172.U) || (stateCounter === 174.U) || (stateCounter === 186.U)){ state := State.sTCK } io.TCK := false.B io.TMS := false.B io.TDI := false.B dataBitCounter := 0.U } is(State.sTCKandTMS) { // jtag init when((stateCounter === 1.U) || (stateCounter === 3.U) || (stateCounter === 5.U) || (stateCounter === 7.U)) { state := State.sTMS } .elsewhen (stateCounter === 9.U) { state := State.sNone } // first instruction .elsewhen((stateCounter === 13.U)){ state := State.sTMS } .elsewhen((stateCounter === 15.U) || (stateCounter === 29.U)){ state := State.sNone } //first data .elsewhen((stateCounter === 33.U) || (stateCounter === 71.U)){ state := State.sNone } // second instruction .elsewhen((stateCounter === 75.U)){ state := State.sTMS } .elsewhen((stateCounter === 77.U) || (stateCounter === 91.U)){ state := State.sNone } //second data .elsewhen((stateCounter === 95.U) || (stateCounter === 165.U)){ state := State.sNone } // third instruction .elsewhen((stateCounter === 169.U)){ state := State.sTMS } .elsewhen((stateCounter === 171.U) || (stateCounter === 185.U)){ state := State.sNone } io.TCK := true.B io.TMS := true.B io.TDI := false.B dataBitCounter := 0.U } is(State.sDataNone) { // first instruction when((stateCounter === 20.U) || (stateCounter === 22.U) || (stateCounter === 24.U)){ state := State.sDataTCK } // first data .elsewhen((stateCounter >= 38.U) && (stateCounter <= 66.U)){ state := State.sDataTCK } // second instruction .elsewhen((stateCounter === 82.U) || (stateCounter === 84.U) || (stateCounter === 86.U)){ state := State.sDataTCK } // second data .elsewhen((stateCounter >= 100.U) && (stateCounter <= 160.U)){ state := State.sDataTCK } // third instruction .elsewhen((stateCounter === 176.U) || (stateCounter === 178.U) || (stateCounter === 180.U)){ state := State.sDataTCK } when(((stateCounter >= 38.U) && (stateCounter <= 66.U)) || ((stateCounter >= 100.U) && (stateCounter <= 160.U))) { dataBitCounter := dataBitCounter + 1.U } io.TCK := false.B io.TMS := false.B when((stateCounter === 22.U) || (stateCounter === 82.U) || (stateCounter === 84.U) || (stateCounter === 176.U)){ io.TDI := true.B } .elsewhen((stateCounter >= 38.U) && (stateCounter <= 66.U)){ io.TDI := lfsrAddrReg(dataBitCounter) } .elsewhen((stateCounter >= 100.U) && (stateCounter <= 160.U)){ io.TDI := lfsrDataReg(dataBitCounter) } .otherwise { io.TDI := false.B } } is(State.sDataTCK) { // first instruction when((stateCounter === 21.U) || (stateCounter === 23.U)){ state := State.sDataNone } .elsewhen(stateCounter === 25.U){ state := State.sDataTMS } // first data .elsewhen((stateCounter >= 39.U) && (stateCounter <= 65.U)){ state := State.sDataNone } .elsewhen(stateCounter === 67.U){ state := State.sDataTMS } // second instruction .elsewhen((stateCounter === 83.U) || (stateCounter === 85.U)){ state := State.sDataNone } .elsewhen(stateCounter === 87.U){ state := State.sDataTMS } // second data .elsewhen((stateCounter >= 101.U) && (stateCounter <= 159.U)){ state := State.sDataNone } .elsewhen(stateCounter === 161.U){ state := State.sDataTMS } // third instruction .elsewhen((stateCounter === 177.U) || (stateCounter === 179.U)){ state := State.sDataNone } .elsewhen(stateCounter === 181.U){ state := State.sDataTMS } io.TCK := true.B io.TMS := false.B when((stateCounter === 23.U) || (stateCounter === 83.U) || (stateCounter === 85.U) || (stateCounter === 177.U)){ io.TDI := true.B } .elsewhen((stateCounter >= 39.U) && (stateCounter <= 67.U)){ io.TDI := lfsrAddrReg(dataBitCounter-1.U) } .elsewhen((stateCounter >= 101.U) && (stateCounter <= 161.U)){ io.TDI := lfsrDataReg(dataBitCounter) } .otherwise { io.TDI := false.B } } is(State.sDataTMS) { // first instruction when((stateCounter === 26.U)){ state := State.sDataTCKandTMS } // first data .elsewhen((stateCounter === 68.U)){ state := State.sDataTCKandTMS } // second instruction .elsewhen((stateCounter === 88.U)){ state := State.sDataTCKandTMS } // second data .elsewhen((stateCounter === 162.U)){ state := State.sDataTCKandTMS } // third instruction .elsewhen((stateCounter === 182.U)){ state := State.sDataTCKandTMS } io.TCK := false.B io.TMS := true.B when(stateCounter === 68.U) { io.TDI := lfsrAddrReg(15) } .elsewhen(stateCounter === 162.U) { io.TDI := lfsrDataReg(31) } .otherwise { io.TDI := false.B } } is(State.sDataTCKandTMS) { // first instruction when((stateCounter === 27.U)){ state := State.sTMS } // first data .elsewhen((stateCounter === 69.U)){ state := State.sTMS } // second instruction .elsewhen((stateCounter === 89.U)){ state := State.sTMS } // second data .elsewhen((stateCounter === 163.U)){ state := State.sTMS } // third instruction .elsewhen((stateCounter === 183.U)){ state := State.sTMS } io.TCK := true.B io.TMS := true.B when(stateCounter === 69.U) { io.TDI := lfsrAddrReg(15) } .elsewhen(stateCounter === 163.U) { io.TDI := lfsrDataReg(31) } .otherwise { io.TDI := false.B } } } }
sequencer/rocket-dsp-utils
build.sbt
<gh_stars>1-10 // SPDX-License-Identifier: Apache-2.0 git.remoteRepo := "<EMAIL>:ucb-bar/dsptools.git" enablePlugins(SiteScaladocPlugin) enablePlugins(GhpagesPlugin) def scalacOptionsVersion(scalaVersion: String): Seq[String] = { Seq() ++ { // If we're building with Scala > 2.11, enable the compile option // switch to support our anonymous Bundle definitions: // https://github.com/scala/bug/issues/10047 CrossVersion.partialVersion(scalaVersion) match { case Some((2, scalaMajor: Long)) if scalaMajor < 12 => Seq() case _ => Seq("-Xsource:2.11") } } } def javacOptionsVersion(scalaVersion: String): Seq[String] = { Seq() ++ { // Scala 2.12 requires Java 8. We continue to generate // Java 7 compatible code for Scala 2.11 // for compatibility with old clients. CrossVersion.partialVersion(scalaVersion) match { case Some((2, scalaMajor: Long)) if scalaMajor < 12 => Seq("-source", "1.7", "-target", "1.7") case _ => Seq("-source", "1.8", "-target", "1.8") } } } // Provide a managed dependency on X if -DXVersion="" is supplied on the command line. val defaultVersions = Map( "chisel-iotesters" -> "1.5.3", "dsptools" -> "1.4.3", "rocketchip" -> "1.2-SNAPSHOT", "api-config-chipsalliance"-> "1.2-SNAPSHOT" ) name := "rocket-dsp-utils" val commonSettings = Seq( organization := "edu.berkeley.cs", version := "0.5-SNAPSHOT", autoAPIMappings := true, scalaVersion := "2.12.13", crossScalaVersions := Seq("2.12.13"), scalacOptions ++= Seq("-unchecked", "-deprecation", "-feature", "-language:reflectiveCalls") ++ scalacOptionsVersion(scalaVersion.value), javacOptions ++= javacOptionsVersion(scalaVersion.value), pomExtra := (<url>http://chisel.eecs.berkeley.edu/</url> <licenses> <license> <name>apache_v2</name> <url>https://opensource.org/licenses/Apache-2.0</url> <distribution>repo</distribution> </license> </licenses> <developers> <developer> <id>grebe</id> <name><NAME></name> <url>http://www.eecs.berkeley.edu/~rigge/</url> </developer> <developer> <id>shunshou</id> <name><NAME></name> <url>https://www.linkedin.com/in/angie-wang-ee/</url> </developer> <developer> <id>chick</id> <name><NAME></name> <url>https://aspire.eecs.berkeley.edu/author/chick/</url> </developer> </developers>), publishTo := { val v = version.value val nexus = "https://oss.sonatype.org/" if (v.trim.endsWith("SNAPSHOT")) { Some("snapshots" at nexus + "content/repositories/snapshots") } else { Some("releases" at nexus + "service/local/staging/deploy/maven2") } }, resolvers ++= Seq ( Resolver.sonatypeRepo("snapshots"), Resolver.sonatypeRepo("releases") ), // scala-steward:on libraryDependencies ++= Seq( "org.typelevel" %% "spire" % "0.16.2", "org.scalanlp" %% "breeze" % "1.1", "org.scalatest" %% "scalatest" % "3.2.8" % "test", ), ) val rocketSettings = Seq( name := "rocket-dsp-utils", libraryDependencies ++= defaultVersions.map { case (dep, version) => "edu.berkeley.cs" %% dep % sys.props.getOrElse(dep + "Version", version) }.toSeq, Test / parallelExecution := false, crossScalaVersions := Seq("2.12.13"), ) publishMavenStyle := true publishArtifact in Test := false pomIncludeRepository := { x => false } val `rocket-dsp-utils` = (project in file(".")). settings(commonSettings: _*). settings(rocketSettings: _*)
sequencer/rocket-dsp-utils
src/test/scala/freechips/rocketchip/jtag2mm/JtagFuzzerTester.scala
<filename>src/test/scala/freechips/rocketchip/jtag2mm/JtagFuzzerTester.scala // SPDX-License-Identifier: Apache-2.0 package freechips.rocketchip.jtag2mm import dsptools.DspTester import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class JtagFuzzerTester(dut: JtagFuzzer) extends DspTester(dut) { step(10) step(5) step(2500) } class JtagFuzzerSpec extends AnyFlatSpec with Matchers { def dut(irLength: Int, beatBytes: Int, numOfTransfers: Int): () => JtagFuzzer = () => { new JtagFuzzer(irLength, beatBytes, numOfTransfers) } val beatBytes = 4 val irLength = 4 val numOfTransfers = 10 it should "Test JTAG Fuzzer" in { //chisel3.iotesters.Driver.execute(Array("-tiwv", "-tbn", "verilator", "-tivsuv"), () => dut) { c => chisel3.iotesters.Driver.execute(Array("-tbn", "verilator"), dut(irLength, beatBytes, numOfTransfers)) { c => new JtagFuzzerTester(c) } should be(true) } }
sequencer/rocket-dsp-utils
src/main/scala/package.scala
<filename>src/main/scala/package.scala //val config = chipsalliance.rocketchip.config
sequencer/rocket-dsp-utils
src/main/scala/freechips/rocketchip/amba/axi4stream/SimpleSplitter.scala
package freechips.rocketchip.amba.axi4stream import chisel3._ import freechips.rocketchip.amba.axi4stream.{AXI4StreamMasterPortParameters, AXI4StreamNexusNode, AXI4StreamSlavePortParameters} import freechips.rocketchip.config.Parameters import freechips.rocketchip.diplomacy._ class SimpleSplitter() extends LazyModule()(Parameters.empty) { val node = AXI4StreamNexusNode( masterFn = { seq => seq.reduce({ (a: AXI4StreamMasterPortParameters, b: AXI4StreamMasterPortParameters) => AXI4StreamMasterPortParameters(a.masterParams.union(b.masterParams))}) }, slaveFn = { seq => seq.reduce({ (_: AXI4StreamSlavePortParameters, b: AXI4StreamSlavePortParameters) => AXI4StreamSlavePortParameters (b.slaveParams .union(b.slaveParams)) }) } ) lazy val module = new LazyModuleImp(this) { require(node.in.length == 1, "Only one input to splitter allowed") val (in, _) = node.in.head in.ready := true.B node.out.foreach { case (out, edge) => require(edge.slave.slaveParams.alwaysReady) out.valid := in.valid out.bits := in.bits assert(!reset.asBool || in.ready) } } } object SimpleSplitter { def apply()(implicit p: Parameters): AXI4StreamNexusNode = { val splitter = LazyModule(new SimpleSplitter()) splitter.node } }
sequencer/rocket-dsp-utils
src/main/scala/freechips/rocketchip/interrupts/Nodes.scala
package freechips.rocketchip.interrupts import chisel3._ import chipsalliance.rocketchip.config.Parameters import freechips.rocketchip.diplomacy._ object IntBundleBridgeImp extends BundleBridgeImp[Vec[Bool]] case class IntToBundleBridgeNode(sinkParams: IntSinkPortParameters)(implicit valName: ValName) extends MixedAdapterNode(IntImp, IntBundleBridgeImp)( dFn = { sourceParams => BundleBridgeParams(() => Vec(sinkParams.sinks.length, Bool())) }, uFn = { sp => sinkParams } ) class IntToBundleBridge(slaveParams: IntSinkPortParameters)(implicit p: Parameters) extends LazyModule { val node = IntToBundleBridgeNode(slaveParams) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, _), (out, _)) => out := in } } } object IntToBundleBridge { def apply(sinkParams: IntSinkPortParameters)(implicit p: Parameters): IntToBundleBridgeNode = { val converter = LazyModule(new IntToBundleBridge(sinkParams)) converter.node } } case class BundleBridgeToIntNode(sourceParams: IntSourcePortParameters) (implicit valName: ValName) extends MixedAdapterNode(IntBundleBridgeImp, IntImp)( dFn = {sinkParams => sourceParams}, uFn = { slaveParams => BundleBridgeParams(None) } ) class BundleBridgeToInt(sourceParams: IntSourcePortParameters)(implicit p: Parameters) extends LazyModule { val node = BundleBridgeToIntNode(sourceParams) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, _), (out, _)) => out := in } } } object BundleBridgeToInt { def apply(sourceParams: IntSourcePortParameters)(implicit p: Parameters): BundleBridgeToIntNode = { val converter = LazyModule(new BundleBridgeToInt(sourceParams)) converter.node } }
sequencer/rocket-dsp-utils
src/main/scala/freechips/rocketchip/jtag2mm/Utils.scala
// See ./LICENSE for license details. package freechips.rocketchip.jtag2mm import chisel3._ import chisel3.util._ // This code was taken from https://github.com/ucb-art/chisel-jtag/blob/master/src/main/scala/jtag/Utils.scala and adjusted to our design needs /** Bundle representing a tristate pin. */ class Tristate extends Bundle { val data = Bool() val driven = Bool() // active high, pin is hi-Z when driven is low } class NegativeEdgeLatch[T <: Data](dataType: T) extends Module { class IoClass extends Bundle { val next = Input(dataType) val enable = Input(Bool()) val output = Output(dataType) } val io = IO(new IoClass) val reg = Reg(dataType) when(io.enable) { reg := io.next } io.output := reg } /** Generates a register that updates on the falling edge of the input clock signal. */ object NegativeEdgeLatch { def apply[T <: Data](modClock: Clock, next: T, enable: Bool = true.B): T = { // TODO better init passing once in-module multiclock support improves val latch_module = withClock((!(modClock.asUInt)).asClock) { Module(new NegativeEdgeLatch(chiselTypeOf(next))) } latch_module.io.next := next latch_module.io.enable := enable latch_module.io.output } } /** A module that counts transitions on the input clock line, used as a basic sanity check and * debug indicator clock-crossing designs. */ class ClockedCounter(counts: BigInt, init: Option[BigInt]) extends Module { require(counts > 0, "really?") val width = log2Ceil(counts) class CountIO extends Bundle { val count = Output(UInt(width.W)) } val io = IO(new CountIO) val count = init match { case Some(init) => RegInit(UInt(width.W), init.U) case None => Reg(UInt(width.W)) } when(count === (counts - 1).asUInt) { count := 0.U }.otherwise { count := count + 1.U } io.count := count } /** Count transitions on the input bit by specifying it as a clock to a counter. */ object ClockedCounter { def apply(data: Bool, counts: BigInt, init: BigInt): UInt = { val counter = withClock(data.asClock) { Module(new ClockedCounter(counts, Some(init))) } counter.io.count } def apply(data: Bool, counts: BigInt): UInt = { val counter = withClock(data.asClock) { Module(new ClockedCounter(counts, None)) } counter.io.count } }
sequencer/rocket-dsp-utils
src/test/scala/dspblocks/DspBlockFirSpec.scala
<reponame>sequencer/rocket-dsp-utils<filename>src/test/scala/dspblocks/DspBlockFirSpec.scala // SPDX-License-Identifier: Apache-2.0 package dspblocks import chisel3._ import chisel3.iotesters._ import chisel3.util.Cat import freechips.rocketchip.amba.axi4._ import freechips.rocketchip.amba.axi4stream._ import freechips.rocketchip.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.regmapper._ import org.scalatest.freespec.AnyFreeSpec class MyManyDynamicElementVecFir(length: Int) extends Module { val io = IO(new Bundle { val in = Input(UInt(8.W)) val valid = Input(Bool()) val out = Output(UInt(8.W)) val consts = Input(Vec(length, UInt(8.W))) }) // Such concision! You'll learn what all this means later. val taps = Seq(io.in) ++ Seq.fill(io.consts.length - 1)(RegInit(0.U(8.W))) taps.zip(taps.tail).foreach { case (a, b) => when(io.valid) { b := a } } io.out := taps.zip(io.consts).map { case (a, b) => a * b }.reduce(_ + _) } // // Base class for all FIRBlocks. // This can be extended to make TileLink, AXI4, APB, AHB, etc. flavors of the FIR filter // abstract class FIRBlock[D, U, EO, EI, B <: Data](val nFilters: Int, val nTaps: Int)(implicit p: Parameters) // HasCSR means that the memory interface will be using the RegMapper API to define status and control registers extends DspBlock[D, U, EO, EI, B] with HasCSR { // diplomatic node for the streaming interface // identity node means the output and input are parameterized to be the same val streamNode = AXI4StreamIdentityNode() // define the what hardware will be elaborated lazy val module = new LazyModuleImp(this) { // get streaming input and output wires from diplomatic node val (in, _) = streamNode.in.head val (out, _) = streamNode.out.head require( in.params.n >= nFilters, s"""AXI-4 Stream port must be big enough for all |the filters (need $nFilters,, only have ${in.params.n})""".stripMargin ) // make registers to store taps val taps = Reg(Vec(nFilters, Vec(nTaps, UInt(8.W)))) // memory map the taps, plus the first address is a read-only field that says how many filter lanes there are val mmap = Seq( RegField.r(64, nFilters.U, RegFieldDesc("nFilters", "Number of filter lanes")) ) ++ taps.flatMap(_.map(t => RegField(8, t, RegFieldDesc("tap", "Tap")))) // generate the hardware for the memory interface // in this class, regmap is abstract (unimplemented). mixing in something like AXI4HasCSR or TLHasCSR // will define regmap for the particular memory interface regmap(mmap.zipWithIndex.map({ case (r, i) => i * 8 -> Seq(r) }): _*) // make the FIR lanes and connect inputs and taps val outs = for (i <- 0 until nFilters) yield { val fir = Module(new MyManyDynamicElementVecFir(nTaps)) fir.io.in := in.bits.data((i + 1) * 8, i * 8) fir.io.valid := in.valid && out.ready fir.io.consts := taps(i) fir.io.out } val output = if (outs.length == 1) { outs.head } else { outs.reduce((x: UInt, y: UInt) => Cat(y, x)) } out.bits.data := output in.ready := out.ready out.valid := in.valid } } // make AXI4 flavor of FIRBlock abstract class AXI4FIRBlock(nFilters: Int, nTaps: Int)(implicit p: Parameters) extends FIRBlock[AXI4MasterPortParameters, AXI4SlavePortParameters, AXI4EdgeParameters, AXI4EdgeParameters, AXI4Bundle](nFilters, nTaps) with AXI4DspBlock with AXI4HasCSR { override val mem = Some( AXI4RegisterNode( AddressSet(0x0, 0xffffL), beatBytes = 8 ) ) } // running the code below will show what firrtl is generated // note that LazyModules aren't really chisel modules- you need to call ".module" on them when invoking the chisel driver // also note that AXI4StandaloneBlock is mixed in- if you forget it, you will get weird diplomacy errors because the memory // interface expects a master and the streaming interface expects to be connected. AXI4StandaloneBlock will add top level IOs // println(chisel3.Driver.emit(() => LazyModule(new AXI4FIRBlock(1, 8)(Parameters.empty) with AXI4StandaloneBlock).module)) import dsptools.tester.MemMasterModel abstract class FIRBlockTester[D, U, EO, EI, B <: Data](c: FIRBlock[D, U, EO, EI, B]) extends PeekPokeTester(c.module) with MemMasterModel { // check that address 0 is the number of filters require(memReadWord(0) == c.nFilters) // write 1 to all the taps for (i <- 0 until c.nFilters * c.nTaps) { memWriteWord(8 + i * 8, 1) } } // specialize the generic tester for axi4 class AXI4FIRBlockTester(c: AXI4FIRBlock with AXI4StandaloneBlock) extends FIRBlockTester(c) with AXI4MasterModel { def memAXI = c.ioMem.get } class DspBlockFirSpec extends AnyFreeSpec { "should run" in { // invoking testers on lazymodules is a little strange. // note that the firblocktester takes a lazymodule, not a module (it calls .module in "extends PeekPokeTester()"). val lm = LazyModule(new AXI4FIRBlock(1, 8)(Parameters.empty) with AXI4StandaloneBlock) chisel3.iotesters.Driver(() => lm.module) { _ => new AXI4FIRBlockTester(lm) } } }
sequencer/rocket-dsp-utils
src/test/scala/dsptools/tester/MemMasterSpec.scala
package dsptools.tester import chisel3._ import chisel3.iotesters.PeekPokeTester import freechips.rocketchip.amba.apb._ import freechips.rocketchip.amba.axi4._ import freechips.rocketchip.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.interrupts._ import freechips.rocketchip.regmapper._ import freechips.rocketchip.tilelink._ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers trait RegmapExample extends HasRegMap { val r0 = RegInit(0.U(64.W)) val r1 = RegInit(1.U(64.W)) regmap( 0x00 -> Seq(RegField(64, r0)), 0x08 -> Seq(RegField(64, r1)), 0x10 -> Seq(RegField(64, r0)), 0x18 -> Seq(RegField(64, r1)), ) } class TLRegmapExample extends TLRegisterRouter(0, "example", Seq("dsptools", "example"), beatBytes = 8, interrupts = 1)( new TLRegBundle(null, _))( new TLRegModule(null, _, _) with RegmapExample)(Parameters.empty) { //TODO: CHIPYARD check usage of echoFields, requestFields, responseFields def standaloneParams = { TLBundleParameters( addressBits = 64, dataBits = 64, sourceBits = 1, sinkBits = 1, sizeBits = 6, echoFields = Seq.empty, requestFields = Seq.empty, responseFields = Seq.empty, hasBCE = false) } val ioMemNode = BundleBridgeSource(() => TLBundle(standaloneParams)) node := BundleBridgeToTL(TLClientPortParameters(Seq(TLClientParameters("bundleBridgeToTL")))) := ioMemNode val ioMem = InModuleBody { ioMemNode.makeIO() } val ioIntNode = BundleBridgeSink[Vec[Bool]]() ioIntNode := IntToBundleBridge(IntSinkPortParameters(Seq(IntSinkParameters()))) := intnode val ioInt = InModuleBody { import chisel3.experimental.IO val io = IO(Output(ioIntNode.bundle.cloneType)) io.suggestName("int") io := ioIntNode.bundle io } } class AXI4RegmapExample extends AXI4RegisterRouter(0, beatBytes = 8, interrupts = 1)( new AXI4RegBundle(null, _))( new AXI4RegModule(null, _, _) with RegmapExample)(Parameters.empty) { def standaloneParams = { //TODO: CHIPYARD, check values for echoFields, requestFields, and responseFields AXI4BundleParameters(addrBits = 64, dataBits = 64, idBits = 1 // userBits = 0, // wcorrupt = false ) } val ioMemNode = BundleBridgeSource(() => AXI4Bundle(standaloneParams)) node := BundleBridgeToAXI4(AXI4MasterPortParameters(Seq(AXI4MasterParameters("bundleBridgeToAXI4")))) := ioMemNode val ioMem = InModuleBody { ioMemNode.makeIO() } val ioIntNode = BundleBridgeSink[Vec[Bool]]() ioIntNode := IntToBundleBridge(IntSinkPortParameters(Seq(IntSinkParameters()))) := intnode val ioInt = InModuleBody { import chisel3.experimental.IO val io = IO(Output(ioIntNode.bundle.cloneType)) io.suggestName("int") io := ioIntNode.bundle io } } class APBRegmapExample extends APBRegisterRouter(0, beatBytes = 8, interrupts = 1)( new APBRegBundle(null, _))( new APBRegModule(null, _, _) with RegmapExample)(Parameters.empty) { def standaloneParams = APBBundleParameters(addrBits = 64, dataBits = 64) val ioMemNode = BundleBridgeSource(() => APBBundle(standaloneParams)) node := BundleBridgeToAPB(APBMasterPortParameters(Seq(APBMasterParameters("bundleBridgeToAPB")))) := ioMemNode val ioMem = InModuleBody { ioMemNode.makeIO() } val ioIntNode = BundleBridgeSink[Vec[Bool]]() ioIntNode := IntToBundleBridge(IntSinkPortParameters(Seq(IntSinkParameters()))) := intnode val ioInt = InModuleBody { import chisel3.experimental.IO val io = IO(Output(ioIntNode.bundle.cloneType)) io.suggestName("int") io := ioIntNode.bundle io } } class MemMasterSpec extends AnyFlatSpec with Matchers { abstract class RegmapExampleTester[M <: MultiIOModule](c: M) extends PeekPokeTester(c) with MemMasterModel { memReadWord(0x00) should be (0) memReadWord(0x08) should be (1) memReadWord(0x10) should be (0) memReadWord(0x18) should be (1) memWriteWord(0, 10) memWriteWord(8, 5) memReadWord(0x00) should be (10) memReadWord(0x08) should be (5) memReadWord(0x10) should be (10) memReadWord(0x18) should be (5) } class TLRegmapExampleTester(c: TLRegmapExample) extends RegmapExampleTester(c.module) with TLMasterModel { def memTL = c.ioMem } class AXI4RegmapExampleTester(c: AXI4RegmapExample) extends RegmapExampleTester(c.module) with AXI4MasterModel { def memAXI = c.ioMem } class APBRegmapExampleTester(c: APBRegmapExample) extends RegmapExampleTester(c.module) with APBMasterModel { def memAPB = c.ioMem } behavior of "MemMaster Tester" // The following test is ignored, since it currently (8/23/19) fails with: // [info] [0.008] Assertion failed: 'A' channel Get carries invalid source ID (connected at MemMasterSpec.scala:35:8) // [info] [0.009] at Monitor.scala:73 assert (source_ok, "'A' channel Get carries invalid source ID" + extra) it should "work with TileLink" ignore { lazy val dut = LazyModule(new TLRegmapExample) // use verilog b/c of verilog blackboxes in TileLink things assert(chisel3.iotesters.Driver.execute(Array[String]("-tbn", "verilator"), () => dut.module) { c => new TLRegmapExampleTester(dut) }) } it should "work with AXI-4" in { lazy val dut = LazyModule(new AXI4RegmapExample) assert(chisel3.iotesters.Driver.execute(Array[String](), () => dut.module) { c => new AXI4RegmapExampleTester(dut) }) } it should "work with APB" in { lazy val dut = LazyModule(new APBRegmapExample) assert(chisel3.iotesters.Driver.execute(Array[String](), () => dut.module) { c => new APBRegmapExampleTester(dut) }) } }
tumsgis/veganmentor
src/test/scala/VeganMentorDbIntegrationTest.scala
<reponame>tumsgis/veganmentor import java.time.LocalDateTime import DataStructure.{Mentee, Mentor} import junit.framework.TestCase import org.junit.Assert._ import org.junit.{After, Before, Test} class VeganMentorDbIntegrationTest extends TestCase { @Before override def setUp(): Unit = MongoDbRepo.dropAllParticipants() @After override def tearDown(): Unit = assertTrue("Some Mentor empty slots are below zero", TestUtil.noEmptySlotsBelowZero) private object testData { val timestamp = LocalDateTime.now val email = "<EMAIL>" val email2 = "<EMAIL>" val name = "name" val note = "note" val approvedTermsAndConditions = true val approvedSlots = 2 } @Test def testSave(): Unit = { import testData._ val mentor: Mentor = new Mentor(timestamp, email, name, note, approvedTermsAndConditions, approvedSlots) MongoDbRepo.saveParticipant(mentor) MongoDbRepo.getMentorByEmail("<EMAIL>").foreach(m => { assertNotEquals(None, m.id) assertEquals(timestamp, m.timestamp) assertEquals(email, m.email) assertEquals(name, m.name) assertEquals(note, m.note) assertTrue(approvedTermsAndConditions) assertEquals(approvedSlots, m.approvedSlots) assertEquals(approvedSlots, m.emptySlots) }) } @Test def testProcessAndUpdate(): Unit = { import testData._ val mentee: Mentee = new Mentee(timestamp, email, name, note, approvedTermsAndConditions) MongoDbRepo.saveParticipant(mentee) MongoDbRepo.getMenteeByEmail("<EMAIL>").foreach(m => { assertNotEquals(None, m.id) assertEquals(timestamp, m.timestamp) assertEquals(email, m.email) assertEquals(name, m.name) assertEquals(note, m.note) assertTrue(approvedTermsAndConditions) assertEquals(None, m.mentorId) }) val mentor: Mentor = new Mentor(timestamp, email2, name, note, approvedTermsAndConditions, approvedSlots) MongoDbRepo.saveParticipant(mentor) VeganMentor.process val mentorFromDb: Mentor = MongoDbRepo.getMentorByEmail(email2).get assertEquals(approvedSlots, mentorFromDb.approvedSlots) assertEquals(approvedSlots - 1, mentorFromDb.emptySlots) val updateMentee: Mentee = Mentee( mentee.id, mentee.timestamp, mentee.email, mentee.name, mentee.note, mentee.approvedTermsAndConditions, mentorFromDb.id) MongoDbRepo.updateParticipant(updateMentee) MongoDbRepo.getMenteeByEmail("<EMAIL>").foreach(m => { assertNotEquals(None, m.id) assertEquals(timestamp, m.timestamp) assertEquals(email, m.email) assertEquals(name, m.name) assertEquals(note, m.note) assertTrue(approvedTermsAndConditions) assertEquals(mentorFromDb.id, m.mentorId) }) } @Test def testProcessingTwoFiles(): Unit = { val testFile4 = "src/test/files/Vegan_mentor_test_04.csv" VeganMentor.saveInputFileToDb(testFile4) VeganMentor.process assertEquals(3, MongoDbRepo.getAllMentors.size) assertEquals(5, MongoDbRepo.getAllMentees.size) // Process another file val testFile5 = "src/test/files/Vegan_mentor_test_05.csv" VeganMentor.saveInputFileToDb(testFile5) VeganMentor.process assertEquals(3, MongoDbRepo.getAllMentors.size) assertEquals(7, MongoDbRepo.getAllMentees.size) assertEquals(1, MongoDbRepo.getMenteesSeekingMentor.size) } @Test def testProcessingSameFileTwice(): Unit = { val testFile4 = "src/test/files/Vegan_mentor_test_04.csv" VeganMentor.saveInputFileToDb(testFile4) VeganMentor.process assertEquals(3, MongoDbRepo.getAllMentors.size) assertEquals(5, MongoDbRepo.getAllMentees.size) // Process another file val testFile5 = "src/test/files/Vegan_mentor_test_04.csv" VeganMentor.saveInputFileToDb(testFile5) VeganMentor.process assertEquals(3, MongoDbRepo.getAllMentors.size) assertEquals(5, MongoDbRepo.getAllMentees.size) } }
tumsgis/veganmentor
src/main/scala/VeganMentor.scala
import DataStructure._ import scala.annotation.tailrec object VeganMentor { def saveInputFileToDb(csvFilePath: String): Unit = { val (mentors, mentees) = Parser.parseInputFile(csvFilePath) mentors.foreach(MongoDbRepo.saveParticipant) mentees.foreach(MongoDbRepo.saveParticipant) } def process: Unit = { val mentors = MongoDbRepo.getMentorsWithEmptySlots val mentees = MongoDbRepo.getMenteesSeekingMentor val sortedQueue = makeSortedQueue(mentors, mentees) val pairingResult = pairParticipants(sortedQueue.mentors, sortedQueue.mentees) pairingResult.pairedParticipants.mentors.foreach(MongoDbRepo.updateParticipant) } def pairParticipants(mentors: Seq[Mentor], mentees: Seq[Mentee]): PairingResult = { def mentorSlotFull(mentor: Mentor): Boolean = if (mentor.emptySlots == 0) true else if (mentor.approvedSlots == mentor.emptySlots) mentor.mentees.size - mentor.approvedSlots == 0 else mentor.mentees.size - mentor.emptySlots == 0 // Iterate through every mentor, assigning to it 1 mentee per iteration. // When the mentors iteration is over, another iteration is started, and again, and again // until all mentees have been assigned a mentor (if that's possible that is). // This way a fair draft is ensured and the odds of someone getting left out is minimized. // Note: Mentor <--> Mentee is a One-to-many relation. @tailrec def pairParticiPants(mentorsIter: Seq[Mentor], menteesIter: Seq[Mentee], mentorsChanged: Seq[Mentor]): PairedParticipants = (mentorsIter, menteesIter, mentorsChanged) match { // All mentees have been assigned a mentor case (_ , Nil, _) => PairedParticipants(mentorsChanged, None) // All mentor slots are full case (_, _, meChanged) if meChanged.forall(mentorSlotFull) => PairedParticipants(meChanged, Some(menteesIter)) // Another iteration through mentors is needed case (Nil, _, _) => pairParticiPants(mentorsChanged, menteesIter, mentorsChanged) // The first mentor in the iteration list has all the slots full case (menIter, _, _) if mentorSlotFull(menIter.head) => pairParticiPants(menIter.tail, menteesIter, mentorsChanged) // Assign mentee to a mentor case _ => pairParticiPants( mentorsIter.tail, menteesIter.tail, mentorsChanged.map(m => if (m.email == mentorsIter.head.email && m.timestamp == mentorsIter.head.timestamp) m.assignMentee(menteesIter.head) else m).toList) } val nonApproved: NonApproved = NonApproved(mentors.filter(!_.approvedTermsAndConditions), mentees.filter(!_.approvedTermsAndConditions)) val approved = (mentors.filter(_.approvedTermsAndConditions), mentees.filter(_.approvedTermsAndConditions)) PairingResult(pairParticiPants(approved._1, approved._2, approved._1), nonApproved) } def makePairingReport(pairingResult: PairingResult): PairingReport = { val pairedMentors: Seq[Mentor] = pairingResult.pairedParticipants.mentors.filter(_.mentees.nonEmpty) val pairedMentees: Seq[Mentee] = pairedMentors.flatMap(p => p.mentees.map(m => m.assignMentor(p))) val mentorsWaiting = pairingResult.pairedParticipants.mentors.filter(_.mentees.isEmpty) val menteesWaiting = pairingResult.pairedParticipants.menteesWaitingList.getOrElse(Seq()) PairingReport(pairedMentors, pairedMentees, mentorsWaiting, menteesWaiting, pairingResult.nonApproved) } def makeSortedQueue (mentorsUnordered: Seq[Mentor], menteesUnOrdered: Seq[Mentee]): SortedQueue = { // Erasing duplicates and then sorting. val mentorsOrdered = mentorsUnordered .groupBy(_.email) .map(m => m._2.head).toList .sortWith((m1, m2) => m1.emptySlots < m2.emptySlots) .sortWith((m1, m2) => m1.timestamp.isBefore (m2.timestamp)) val menteesOrdered = menteesUnOrdered .groupBy (_.email) .map(m => m._2.head).toList .sortWith((m1, m2) => m1.timestamp.isBefore (m2.timestamp) ) SortedQueue (mentorsOrdered, menteesOrdered) } }
tumsgis/veganmentor
src/test/scala/VeganMentorTest.scala
import junit.framework.TestCase import org.junit.Assert._ import org.junit.{Before, Test} import MongoDbRepo._ class VeganMentorTest extends TestCase{ @Before override def setUp(): Unit = dropAllParticipants() @Test def testProcessing01(): Unit = { val testFile = "src/test/files/Vegan_mentor_test_01.csv" val parsedInput = Parser.parseInputFile(testFile) val sortedInput = VeganMentor.makeSortedQueue(parsedInput._1, parsedInput._2) val pairingResult = VeganMentor.pairParticipants(sortedInput.mentors, sortedInput.mentees) val pairingReport = VeganMentor.makePairingReport(pairingResult) val pairedMentors = pairingReport.pairedMentors val pairedMentees = pairingReport.pairedMentees val mentorsWaiting = pairingReport.mentorsWaiting val menteesWaiting = pairingReport.menteesWaiting val nonApproved = pairingReport.nonApproved pairedMentors.foreach(JsonMapping.printMentor) pairedMentees.foreach(JsonMapping.printMentee) mentorsWaiting.foreach(JsonMapping.printMentor) menteesWaiting.foreach(JsonMapping.printMentee) nonApproved.mentors.foreach(JsonMapping.printMentor) nonApproved.mentees.foreach(JsonMapping.printMentee) assertEquals(3, pairedMentors.size) assertEquals(3, pairedMentees.size) assertEquals(0, mentorsWaiting.size) assertEquals(2, menteesWaiting.size) assertEquals(0, nonApproved.mentors.size) assertEquals(0, nonApproved.mentees.size) assertEquals(1, pairedMentors.head.mentees.size) assertEquals(1, pairedMentors(1).mentees.size) assertEquals(1, pairedMentors(2).mentees.size) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors.head.email} ${pairedMentors.head.mentees.head.email}" ) assertEquals( "hekk<EMAIL> <EMAIL>", s"${pairedMentors(1).email} ${pairedMentors(1).mentees.head.email}" ) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors(2).email} ${pairedMentors(2).mentees.head.email}" ) assertEquals( "<EMAIL>", s"${menteesWaiting.head.email}" ) assertEquals( "<EMAIL>", s"${menteesWaiting(1).email}" ) } @Test def testProcessing02 = { val testFile = "src/test/files/Vegan_mentor_test_02.csv" val parsedInput = Parser.parseInputFile(testFile) val sortedInput = VeganMentor.makeSortedQueue(parsedInput._1, parsedInput._2) val pairingResult = VeganMentor.pairParticipants(sortedInput.mentors, sortedInput.mentees) val pairingReport = VeganMentor.makePairingReport(pairingResult) val pairedMentors = pairingReport.pairedMentors val pairedMentees = pairingReport.pairedMentees val mentorsWaiting = pairingReport.mentorsWaiting val menteesWaiting = pairingReport.menteesWaiting val nonApproved = pairingReport.nonApproved pairedMentors.foreach(JsonMapping.printMentor) pairedMentees.foreach(JsonMapping.printMentee) mentorsWaiting.foreach(JsonMapping.printMentor) menteesWaiting.foreach(JsonMapping.printMentee) nonApproved.mentors.foreach(JsonMapping.printMentor) nonApproved.mentees.foreach(JsonMapping.printMentee) assertEquals(1, pairedMentors.size) assertEquals(1, pairedMentees.size) assertEquals(2, mentorsWaiting.size) assertEquals(0, menteesWaiting.size) assertEquals(0, nonApproved.mentors.size) assertEquals(0, nonApproved.mentees.size) assertEquals(1, pairedMentors.head.mentees.size) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors.head.email} ${pairedMentors.head.mentees.head.email}" ) assertEquals( "<EMAIL>", s"${mentorsWaiting.head.email}" ) assertEquals( "<EMAIL>", s"${mentorsWaiting(1).email}" ) } /** Scenario having participants that did not approve of terms and conditions */ @Test def testProcessing03 = { val testFile = "src/test/files/Vegan_mentor_test_03.csv" val parsedInput = Parser.parseInputFile(testFile) val sortedInput = VeganMentor.makeSortedQueue(parsedInput._1, parsedInput._2) val pairingResult = VeganMentor.pairParticipants(sortedInput.mentors, sortedInput.mentees) val pairingReport = VeganMentor.makePairingReport(pairingResult) val pairedMentors = pairingReport.pairedMentors val pairedMentees = pairingReport.pairedMentees val mentorsWaiting = pairingReport.mentorsWaiting val menteesWaiting = pairingReport.menteesWaiting val nonApproved = pairingReport.nonApproved pairedMentors.foreach(JsonMapping.printMentor) pairedMentees.foreach(JsonMapping.printMentee) mentorsWaiting.foreach(JsonMapping.printMentor) menteesWaiting.foreach(JsonMapping.printMentee) nonApproved.mentors.foreach(JsonMapping.printMentor) nonApproved.mentees.foreach(JsonMapping.printMentee) assertEquals(2, pairedMentors.size) assertEquals(5, pairedMentees.size) assertEquals(0, mentorsWaiting.size) assertEquals(0, menteesWaiting.size) assertEquals(2, nonApproved.mentors.size) assertEquals(1, nonApproved.mentees.size) assertEquals(3, pairedMentors.head.mentees.size) assertEquals(2, pairedMentors(1).mentees.size) assertEquals( "<EMAIL> gri<EMAIL>", s"${pairedMentors.head.email} ${pairedMentors.head.mentees.head.email}" ) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors(1).email} ${pairedMentors(1).mentees.head.email}" ) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors.head.email} ${pairedMentors.head.mentees(1).email}" ) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors(1).email} ${pairedMentors(1).mentees(1).email}" ) assertEquals( "<EMAIL> <EMAIL>", s"${pairedMentors.head.email} ${pairedMentors.head.mentees(2).email}" ) assertEquals( "<EMAIL> <EMAIL> <EMAIL>", s"${nonApproved.mentees.head.email} ${nonApproved.mentors.head.email} ${nonApproved.mentors(1).email}" ) } }
tumsgis/veganmentor
src/main/scala/Parser.scala
import java.io.File import DataStructure.{Mentee, Mentor, SortedQueue, Participant} import com.github.tototoshi.csv.CSVReader object Parser { def parseInputFile(filePath: String): (Seq[Mentor], Seq[Mentee]) = { val reader = CSVReader.open(new File (filePath)) val participants: Seq[Participant] = reader.all.tail.map (o => if (DataStructure.mentorShipMap (o (3) ) ) new Mentor (Util.parseDate (o.head), o (1), o (2), o (5), o (6).trim.nonEmpty, o (4).toInt) else new Mentee (Util.parseDate (o.head), o (1), o (2), o (5), o (6).trim.nonEmpty) ) val (mentorsUnordered: Seq[Mentor], menteesUnOrdered: Seq[Mentee] ) = participants partition {_.isInstanceOf[Mentor]} (mentorsUnordered, menteesUnOrdered) } }
tumsgis/veganmentor
src/main/scala/Util.scala
import java.time.{LocalDateTime, ZoneId} import java.time.format.DateTimeFormatter import java.util.Date object Util { private val formatter = DateTimeFormatter.ofPattern("yyyy/MM/dd hh:mm:ss a z") private val formatterWithSimplerHourFormat = DateTimeFormatter.ofPattern("yyyy/MM/dd h:mm:ss a z") private def getDateFormatter(dateInput: String) = if (dateInput.split(" ")(1).split(":")(0).toInt < 10) formatterWithSimplerHourFormat else formatter def parseDate(dateInput: String): LocalDateTime = LocalDateTime.parse(dateInput, getDateFormatter(dateInput)) def toJavaDate(ldt: LocalDateTime): Date = Date.from(ldt.atZone(ZoneId.systemDefault).toInstant) def fromJavaDate(javaDate: java.util.Date): LocalDateTime = javaDate.toInstant.atZone(ZoneId.systemDefault).toLocalDateTime }
tumsgis/veganmentor
src/main/scala/DataStructure.scala
import java.time.LocalDateTime import com.mongodb.casbah.Imports object DataStructure { val mentorShipMap = Map("Mentor" -> true, "Lærlingur" -> false) sealed trait Participant { def timestamp: LocalDateTime def email: String def name: String def note: String def approvedTermsAndConditions: Boolean } case class Mentor(id: Option[Imports.ObjectId], timestamp: LocalDateTime, email: String, name: String, note: String, approvedTermsAndConditions: Boolean, approvedSlots: Int, emptySlots: Int, mentees: List[Mentee] = List()) extends Participant { def this(timestamp: LocalDateTime, email: String, name: String, note: String, approvedTermsAndConditions: Boolean, approvedSlots: Int) = this(None, timestamp, email, name, note, approvedTermsAndConditions, approvedSlots, approvedSlots) def assignMentee(mentee: Mentee): Mentor = copy(mentees = mentee :: mentees) } case class Mentee(id: Option[Imports.ObjectId], timestamp: LocalDateTime, email: String, name: String, note: String, approvedTermsAndConditions: Boolean, mentorId: Option[Imports.ObjectId], mentor: Option[Mentor] = None) extends Participant { def this(timestamp: LocalDateTime, email: String, name: String, note: String, approvedTermsAndConditions: Boolean) = this(None, timestamp, email, name, note, approvedTermsAndConditions, None) def assignMentor(mentor: Mentor): Mentee = copy(mentor = Some(mentor)) } case class SortedQueue(mentors: Seq[Mentor], mentees: Seq[Mentee]) case class PairedParticipants(mentors: Seq[Mentor], menteesWaitingList: Option[Seq[Mentee]]) case class NonApproved(mentors: Seq[Mentor], mentees: Seq[Mentee]) case class PairingResult(pairedParticipants: PairedParticipants, nonApproved: NonApproved) case class PairingReport(pairedMentors: Seq[Mentor], pairedMentees: Seq[Mentee], mentorsWaiting: Seq[Mentor], menteesWaiting: Seq[Mentee], nonApproved: NonApproved) }
tumsgis/veganmentor
build.sbt
name := "VeganMentor" version := "0.1" scalaVersion := "2.12.4" libraryDependencies += "com.github.tototoshi" %% "scala-csv" % "1.3.5" libraryDependencies += "junit" % "junit" % "4.12" % Test libraryDependencies += "com.google.apis" % "google-api-services-gmail" % "v1-rev75-1.23.0" libraryDependencies += "com.google.api-client" % "google-api-client" % "1.23.0" libraryDependencies += "com.google.oauth-client" % "google-oauth-client-jetty" % "1.23.0" libraryDependencies += "javax.mail" % "javax.mail-api" % "1.6.0" libraryDependencies += "com.sun.mail" % "javax.mail" % "1.6.0" libraryDependencies += "net.liftweb" %% "lift-json" % "3.2.0-M3" libraryDependencies += "org.mongodb.scala" %% "mongo-scala-driver" % "2.2.0" libraryDependencies += "org.mongodb" %% "casbah" % "3.1.1" libraryDependencies += "org.slf4j" % "slf4j-simple" % "1.6.4"